blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2 values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313 values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107 values | src_encoding stringclasses 20 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 4 6.02M | extension stringclasses 78 values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
de2842493d93698702a7a39dfd9f732b96f0356d | 41566721eba27cc128846306e5bbf8ed0fd1ec45 | /NNoptimizeTest.py | 0c21f14982c6d59a60cf1428eea45586bcf7e8e5 | [] | no_license | tbrownex/Fraud-Detection | 385e345b0c2bf0364c122182c0f06ea35a94c803 | 41d034686693592f92bf37d30b4865941bcc0d20 | refs/heads/master | 2021-06-16T13:41:53.993993 | 2021-04-07T00:07:34 | 2021-04-07T00:07:34 | 191,569,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,734 | py | import pandas as pd
import numpy as np
import time
import tensorflow as tf
from getConfig import getConfig
from getArgs import getArgs
from setLogging import setLogging
from getData import getData
from preProcess import preProcess
from createDataset import createDataset
from getModelParms import getParms
from NNtest import Model
from getClassScores import getClassScores
def printStats(dataDict):
ratio = int(dataDict["trainY"].shape[0]/dataDict["trainY"][:,1].sum())
print("Positive ratio of {}:1".format(ratio))
input()
return ratio
def loadParms(p, ratio):
parmDict = {}
parmDict['l1Size'] = p[0]
parmDict['activation'] = p[1]
parmDict['learningRate'] = p[2]
parmDict["Lambda"] = p[3]
parmDict['dropout'] = p[4]
parmDict['optimizer'] = p[5]
parmDict["weight"] = ratio
parmDict["featureCount"] = 2
return parmDict
def saveModel(model, config):
pickle.dump(model, open(config["modelDir"] + "XGBmodel", 'wb'))
def process(dataDict, parmList, config, args, ratio):
with tf.name_scope("inputPipeline"):
trainDS = createDataset(dataDict, config, "train")
valDS = createDataset(dataDict, config, "val")
iter = tf.data.Iterator.from_structure(trainDS.output_types, tf.compat.v1.data.get_output_shapes(trainDS))
features, labels = iter.get_next()
trainInit = iter.make_initializer(trainDS)
valInit = iter.make_initializer(valDS)
for p in parmList:
parmDict = loadParms(p, ratio)
nn = Model(parmDict, features, labels)
cost = nn.cost
train = nn.train
epochs=10
trainBatches = int(dataDict["trainX"].shape[0]/config["batchSize"])
valBatches = int(dataDict["valX"].shape[0]/config["batchSize"])
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for e in range(epochs):
sess.run(trainInit)
for _ in range(trainBatches):
_ = sess.run(train)
sess.run(valInit)
actualPositives = 0
for _ in range(valBatches):
l , preds = sess.run([labels, nn._output])
scores = getClassScores(l, preds)
print(scores)
input()
#print("epoch ", e, " cost: ", valCost/valBatches)
#np.savetxt("/home/tbrownex/labels.csv", l, delimiter=",")
#np.savetxt("/home/tbrownex/entropy.csv", c, delimiter=",")
'''bestScore = np.inf
dfList = []
count=1
for p in parmList:
parmDict = loadParms(p, ratio)
lift = run(dataDict, parmDict, config)
tup = (count, parm_dict, lift)
results.append(tup)
count +=1
tmp = pd.DataFrame.from_records([parms])
dfList.append(tmp)
if args.save:
if totalScore < bestScore:
bestScore = totalScore
saveModel(model, config)
print("{} of {}".format(count, len(parmList)))
count+=1'''
return
#return pd.concat(dfList)
if __name__ == "__main__":
args = getArgs()
config = getConfig()
setLogging(config)
df = getData(config)
dataDict = preProcess(df, config, args)
ratio = printStats(dataDict)
parmList = getParms("NN")
start = time.time()
results = process(dataDict, parmList, config, args, ratio)
#results.to_csv("/home/tbrownex/NNresults.csv", index=False)
elapsed = (time.time() - start)/60
print("Elapsed time: {:.1f} minutes".format(elapsed)) | [
"tbrownex@gmail.com"
] | tbrownex@gmail.com |
366cc839edcb25dc29627421e164b16ce338ed74 | 49aa27e9f1bf3efa1f6fdd54556124647fac972c | /src/main/python/geeksforgeeks/dp/path-in-matrix.py | 524fe5a79ddce3a86dac6cbf6344e919d9633248 | [
"Apache-2.0"
] | permissive | sonymoon/algorithm | 2cd699a61223740200d507edac8bff05b7ab7eb8 | cc2a9e0125fc64bdbf6549034bad6482d2027ea2 | refs/heads/master | 2020-06-11T07:06:32.161489 | 2019-03-25T03:58:22 | 2019-03-25T03:58:22 | 75,738,406 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,222 | py | def pathInMatrix(result, matrix, i, j, m, n):
if i >= 0 and j >= 0:
result.append(matrix[i][j])
if 0 <= i - 1 < m and 0 <= j < n and matrix[i - 1][j] - matrix[i][j] == 1:
pathInMatrix(result, matrix, i - 1, j, m, n)
if 0 <= i < m and 0 <= j + 1 < n and matrix[i][j + 1] - matrix[i][j] == 1:
pathInMatrix(result, matrix, i, j + 1, m, n)
if 0 <= i + 1 < m and 0 <= j < n and matrix[i + 1][j] - matrix[i][j] == 1:
pathInMatrix(result, matrix, i + 1, j, m, n)
if 0 <= i - 1 < m and 0 <= j - 1 < n and matrix[i - 1][j - 1] - matrix[i][j] == 1:
pathInMatrix(result, matrix, i - 1, j - 1, m, n)
return result
def main(matrix):
m = len(matrix)
if m == 0:
return
n = len(matrix[0])
result = []
for i in range(m):
for j in range(n):
result.append(pathInMatrix([], matrix, i, j, m, n))
max0 = 0
maxLength = 0
for x in range(len(result)):
if len(result[x]) > maxLength:
maxLength = len(result[x])
max0 = x
if result:
print(result[max0])
return len(result[max0])
return 0
matrix = [
[1, 2, 9],
[5, 3, 8],
[4, 6, 7]
]
print main(matrix)
| [
"bailei02@meituan.com"
] | bailei02@meituan.com |
03c7167733f235f4307297442c65882718598a6e | 7b97d6fef74b35d2f26a9fed79b5b15782f8f9a5 | /examples/basic_example.py | 6ea409ffd7dddf1566fbada82d53d23511f7979b | [
"MIT"
] | permissive | Edinburgh-Genome-Foundry/tatapov | 22ac8e1fc506267a5d85f6063596485e9fdba9e4 | 06c2aa13e49affc7419e16e853d31c835813fe04 | refs/heads/master | 2023-09-03T22:28:55.443170 | 2022-05-06T13:41:32 | 2022-05-06T13:41:32 | 150,324,605 | 12 | 2 | NOASSERTION | 2020-09-08T23:31:43 | 2018-09-25T20:22:46 | Python | UTF-8 | Python | false | false | 242 | py | import tatapov
data = tatapov.annealing_data["25C"]["01h"]
subset = tatapov.data_subset(data, ["ACGA", "AAAT", "AGAG"], add_reverse=True)
ax, _ = tatapov.plot_data(subset, figwidth=5)
ax.figure.tight_layout()
ax.figure.savefig("example.png") | [
"valentin.zulkower@gmail.com"
] | valentin.zulkower@gmail.com |
b96a7143fbc39db7c6994da5da404829e8ed32a4 | 683249b0cf7d49aa936007ffe5efd78a56193e5f | /mysite/blog/models.py | eecbb4f9f0b59da0e17bc6a32dffc51434be91f8 | [] | no_license | aneeshvermalearning/django-learning | 8aacb1203bd1aabf3f2b8a6b66f9f0b28112b040 | 8c0547a9f45d659682d53fa8e4960896d261987c | refs/heads/master | 2021-09-12T16:32:05.186802 | 2018-04-18T17:44:45 | 2018-04-18T17:44:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | from django.db import models
# this is a table that inherits from models.Model
class Post(models.Model):
# these are columns... own data types
title = models.CharField(max_length=140)
body = models.TextField()
date = models.DateTimeField()
def __str__(self):
return self.title | [
"aneesh.verma09@gmail.com"
] | aneesh.verma09@gmail.com |
4532065d8480f285b272c6786f055f16ace6dbe3 | 8c0e94f22b68959e06ca02c167c8109f5144a0c9 | /ExamSim/wsgi.py | 2757302d0fbaecf1dbadeac9fa92c753cc7d872c | [] | no_license | jithindk/ExamSim | 820e504ffac9ca9a02d5173d91bedb25d46cb694 | f808cb7ddddf6e09a83682aa53aafb8e1d83c60a | refs/heads/master | 2023-02-15T15:39:11.720028 | 2021-01-14T04:22:32 | 2021-01-14T04:22:32 | 327,716,672 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for ExamSim project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'ExamSim.settings')
application = get_wsgi_application()
| [
"jithinloyolite@gmail.com"
] | jithinloyolite@gmail.com |
0eed8c89461eb6c3b8d3047d689917d934f242ea | edf125be37a40caeb14c7fe32bd9f7511cf0ce9b | /07-cleaning-data-in-python/5-case-study/checking_data_types.py | dbf0982d7a52005d2ab359bdf31bf73197f34252 | [] | no_license | vedpbharti/Datacamp | 1d3d2ca0722a3a19733e91fa054f64e0c3b7114a | b6d019efebe1b46765f19212ba2d8ebb9d90de57 | refs/heads/master | 2020-04-05T05:47:28.528088 | 2019-02-10T22:34:00 | 2019-02-10T22:34:00 | 156,610,704 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,221 | py | '''Checking the data types
Now that your data is in the proper shape, you need to ensure that the columns are of the proper data type. That is, you need to ensure that country is of type object, year is of type int64, and life_expectancy is of type float64.
The tidy DataFrame has been pre-loaded as gapminder. Explore it in the IPython Shell using the .info() method. Notice that the column 'year' is of type object. This is incorrect, so you'll need to use the pd.to_numeric() function to convert it to a numeric data type.
NumPy and pandas have been pre-imported as np and pd.
Instructions
100 XP
Convert the year column of gapminder using pd.to_numeric().
Assert that the country column is of type np.object. This has been done for you.
Assert that the year column is of type np.int64.
Assert that the life_expectancy column is of type np.float64.'''
# Convert the year column to numeric
gapminder.year = pd.to_numeric(gapminder['year'], errors='coerce')
# Test if country is of type object
assert gapminder.country.dtypes == np.object
# Test if year is of type int64
assert gapminder.year.dtypes == np.int64
# Test if life_expectancy is of type float64
assert gapminder.life_expectancy.dtypes == np.float64
| [
"ved.bhartig@gmail.com"
] | ved.bhartig@gmail.com |
868a24164d83973e964adf0c20daaf8ecca8e9a3 | c20d163f981aa753aaf1d51b5b49016e1d68ff61 | /api/urls.py | bb8b1a61de6cbfe7cc5c36cd2b65b0d99ceb1f8d | [] | no_license | shkarcot/ZZZtesting | b8afc4f0d79486f8d5d57492f1a4891233196684 | 924e390325e0b55a852dd6ec7587d4e61e7bf558 | refs/heads/master | 2020-04-24T02:39:07.853125 | 2019-02-20T10:48:47 | 2019-02-20T10:48:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,196 | py | from django.conf.urls import url
from api import views
from scripts.seed_data import seed_data
from config_vars import DASHBOARD_CONFIG
urlpatterns = [
# Case Queue Management APIs
url(r'^createqueue/$', views.case_queue_management, name="Queue Management"),
url(r'^getqueues/$', views.case_queue_management, name="Queue Management"),
url(r'^updatequeue/$', views.update_case_queue, name="Queue Management"),
url(r'^deletequeue/(?P<queue_id>[\w\-]+)/$',
views.delete_case_queue, name="Queue Management"),
url(r'^assigncase/$', views.assign_case, name="Queue Management"),
url(r'^fetchcasescount/(?P<sv_user>[\w\-]+)/$', views.fetch_cases_count,
name="Queue Management"),
url(r'^fetchqueuecases/(?P<queue_id>[\w\-]+)/(?P<status>[\w\-]+)/$',
views.fetch_cases, name="Queue Management"),
# url(r'^casestatusupdate/$', views.case_status_update, name="Queue Management"),
url(r'^dashboard/cases/$', views.dashboard_queues, name="Queue Management"),
url(r'^dashboard/docs/$', views.dashboard_docs, name="Queue Management"),
# AUTHORIZATION APIs
url(r'^auth/', views.auth, name="Authorization"),
url(r'^auth/info', views.auth_info, name="Get Logged-in User Info"),
# PLATFORM APIs
# 1. Solution Api
url(r'^soln/$', views.solution),
url(r'^soln/(?P<sol_info>\w+)/$', views.solution),
url(r'^activeTenant/', views.active_tenants),
url(r'^get/solnid/', views.get_solution),
url(r'^get/templatecount/', views.get_template_counts),
# 2. Entities Api
url(r'^solnConfig/definitions/', views.entity_defnitions),
url(r'^solnConfig/$', views.solution_config),
url(r'^solnConfig/(?P<endpoint>\w+)/(?P<file_type>\w+)/$', views.solution_config),
url(r'^solnConfig/(?P<endpoint>\w+)/$', views.solution_config),
url(r'^solnConfig/status/(?P<job_id>[\w\-]+)/$', views.check_import_status),
url(r'^entitylist/$', views.get_all_entities),
# 3. Resource library Api
url(r'^load_training/data/', views.load_training_data),
url(r'^download/efs/', views.download_training_data),
url(r'^download/(?P<doc_id>[\w\-]+)/$', views.download_training_data),
url(r'^data/upload/$', views.upload_data_files),
url(r'^data/upload/(?P<type>[\w\-]+)/$', views.upload_data_files),
url(r'^data/resource/get/$', views.resource_data),
url(r'^terms/$', views.process_term),
url(r'^get/hierarchy/$', views.hierarchy_info),
url(r'^tags/$', views.process_tag),
url(r'^data/resource/$', views.resource_data),
url(r'^camunda/workflow/$', views.camunda_workflow),
url(r'^spec/$', views.camunda_api_spec_uri),
url(r'^sftp/files/$', views.get_all_sftp_files),
url(r'^get/sftpinfo/(?P<solution_id>[\w\-]+)/$', views.get_sftp_info),
url(r'^get/sftpuser/$', views.get_sftp_user),
url(r'^get/sftpuser/(?P<solution_id>[\w\-]+)/$', views.get_sftp_user),
url(r'^presignedurl/$', views.get_preassigned_s3_url),
url(r'^getfiles/$', views.get_files),
url(r'^deletefile/$', views.delete_file),
# 4. Document template Api
url(r'^documentTemplates/list/$', views.document_templates, name="Document Templates List"),
url(r'^documentTemplates/(?P<template_id>[\w\-]+)/$', views.document_templates, name="Document Template"),
url(r'^documentTemplates/unknown/$', views.document_templates, name="Held Document Templates"),
url(r'^documentTemplates/$', views.document_templates, name="Held Document Templates"),
url(r'^documentTemplates/postprocess/(?P<template_id>[\w\-]+)/$', views.document_post_process,
name="Post processing rules"),
url(r'^documentTemplates/test/$', views.document_templates, name="Test Templates"),
url(r'^documentTemplates/test/(?P<template_id>[\w\-]+)/$', views.document_templates, name="Test Templates"),
url(r'^testdocuments/(?P<doc_id>[\w\-]+)/$', views.test_documents, name="Test Templates"),
url(r'^templateElements/(?P<template_id>[\w\-]+)/(?P<page_no>[\w\-]+)/$', views.document_templates_fields,
name="Document Template"),
url(r'^templateElements/$', views.document_templates_fields,
name="Document Template"),
url(r'^documentTemplates/train/(?P<endpoint>[\w\-]+)/$', views.templates_train,
name="template upload samples and trigger training"),
url(r'^documentTemplates/train/(?P<endpoint>[\w\-]+)/(?P<template_id>[\w\-]+)/$', views.templates_train,
name="get template samples"),
# 5. Services API
url(r'^services/ingest/$', views.services),
url(r'^services/test/$', views.services),
url(r'^services/$', views.services),
url(r'^services/trainupload/$', views.train_set),
url(r'^services/create/$', views.create_service),
url(r'^platformConfig/nlpEngine/processors/', views.platformConfig_nlpEngine_processors),
# 6. Rules API
url(r'^rules', views.rules),
url(r'^rules/test/$', views.rules),
url(r'^rulesConfig/operators/', views.rulesConfig_operators),
url(r'^insight/(?P<type>[\w\-]+)', views.insight_service),
url(r'^createTrainingSet/', views.create_training_set),
url(r'^trainingSetModels/(?P<type>[\w\-]+)/', views.training_set_models),
url(r'^jrules/$', views.jrules),
url(r'^jrules/test/$', views.jrules_test),
url(r'^jrules/customtest/$', views.jrules_custom_test),
url(r'^jrules/config/$', views.jrules_config),
url(r'^jrules/custom/$', views.jrules_custom),
url(r'^jrules/custom/(?P<type>[\w\-]+)/$', views.jrules_custom),
# 7. Learning models API
url(r'^models/$', views.models),
url(r'^models/ensembles/$', views.models),
url(r'^models/details/$', views.models),
url(r'^models/components/$', views.models),
url(r'^models/test/$', views.models),
url(r'^models/train/$', views.models),
url(r'^models/retrain/$', views.models),
url(r'^models/evaluate/$', views.models),
url(r'^models/config/$', views.models),
url(r'^models/flowupdate/$', views.models),
url(r'^models/save/$', views.models),
url(r'^models/dataset/upload/$', views.learning_datasets),
url(r'^models/dataset/list/$', views.learning_datasets),
url(r'^models/dataset/archive/$', views.learning_datasets),
url(r'^models/type/$', views.get_learning_config),
url(r'^models/dataset/type/$', views.get_learning_config),
url(r'^models/dataset/download/(?P<path>.*)$', views.dataset_download),
url(r'^models/session/get/$', views.notebook_session),
url(r'^models/binaries/list/$', views.learning_binaries),
url(r'^models/evaluationdetails/$', views.learning_evaluation),
url(r'^models/previousrundetails/$', views.previous_run_details),
url(r'^models/binary/upload/$', views.learning_binary),
url(r'^models/binary/download/(?P<path>.*)$', views.binary_download),
url(r'^models/version/download/(?P<path>.*)$', views.results_download),
url(r'^models/previousrun/download/(?P<path>.*)$', views.results_download),
url(r'^models/binary/archive/$', views.learning_binary_update),
# REFERENCE APP APIs
url(r'^getInsights/(?P<doc_id>[\w\-]+)/$', views.get_insights),
url(r'^dashboard/$', views.dashboard),
url(r'^getInsights/$', views.get_insights),
url(r'^documents/(?P<doc_id>[\w\-]+)/(?P<page_no>[\w\-]+)/$', views.document_details),
url(r'^feedback/$', views.feedback_service),
url(r'^feedback/entity/$', views.feedback_entity),
url(r'^retrain/$', views.retrain),
url(r'^review/$', views.review),
url(r'^download/json/(?P<doc_id>[\w\-]+)/$', views.download_json),
url(r'^intentreview/$', views.intent_review),
url(r'^documentTypes/', views.get_document_types),
url(r'^grouping/review/(?P<doc_id>[\w\-]+)/$', views.grouping_review),
url(r'^feedback/text/$', views.text_feedback),
url(r'^completeReview/text/$', views.complete_review),
url(r'^completeReview/entity/$', views.complete_review),
url(r'^completeReview/review/(?P<doc_id>[\w\-]+)/$', views.complete_review),
url(r'^change/state/$', views.change_state),
url(r'^childdocs/$', views.child_documents),
url(r'^getData/(?P<selector>[\w\-]+)/(?P<doc_type>[\w\-]+)/$', views.get_processed_data),
url(r'^getData/(?P<selector>[\w\-]+)/(?P<doc_type>[\w\-]+)/(?P<query>[\w\-]+)/$', views.get_processed_data),
url(r'^getRecord/(?P<selector>[\w\-]+)/(?P<doc_type>[\w\-]+)/(?P<file_flow_id>[\w\-]+)/(?P<direction>[\w\-]+)/$',
views.get_record_data),
url(r'^getRecord/(?P<selector>[\w\-]+)/(?P<doc_type>[\w\-]+)/(?P<file_flow_id>[\w\-]+)/(?P<direction>[\w\-]+)/'
r'(?P<query>[\w\-]+)/$', views.get_record_data),
url(r'^getJson/(?P<file_flow_id>[\w\-]+)', views.json_download),
url(r'^getReviewList/(?P<status>[\w\-]+)/(?P<doc_type>[\w\-]+)/$', views.review_list),
url(r'^getReviewList/(?P<status>[\w\-]+)/(?P<doc_type>[\w\-]+)/(?P<query>[\w\-]+)/$', views.review_list),
url(r'^getReview/(?P<status>[\w\-]+)/(?P<doc_type>[\w\-]+)/(?P<file_flow_id>[\w\-]+)/(?P<direction>[\w\-]+)/$',
views.get_review),
url(r'^getReview/(?P<status>[\w\-]+)/(?P<doc_type>[\w\-]+)/(?P<file_flow_id>[\w\-]+)/(?P<direction>[\w\-]+)/'
r'(?P<query>[\w\-]+)/$', views.get_review),
url(r'^intent/(?P<doc_id>[\w\-]+)/', views.intent),
url(r'^postReview/(?P<file_flow_id>[\w\-]+)/', views.post_review),
url(r'^chart/(?P<chart_id>[\w\-]+)/(?P<selector>[\w\-]+)/', views.get_chart),
url(r'^selectors/', views.get_selectors),
url(r'^extractiondata/(?P<doc_id>[\w\-]+)/$', views.fetch_section_data),
url(r'^entitylink/(?P<doc_id>[\w\-]+)/$', views.process_entity_data),
url(r'^thresholds/', views.threshold_data),
url(r'^mapping_entities/(?P<template_id>[\w\-]+)/$', views.get_mapping_entities),
# pipeline Api's
url(r'^pipeline/settings/', views.pipeline_settings),
url(r'^pipeline/email/', views.email_details),
url(r'^pipeline/upload/', views.pipeline_ingest),
url(r'^pipeline/status/', views.pipleline_status),
url(r'^pipeline/', views.update_s3),
url(r'^getpipeline/', views.get_pipeline),
# OTHERS
url(r'^jobStatus/(?P<job_id>[\w\-]+)/$', views.job_status),
# Workflow APIs
url(r'^workflows/$', views.workflow_management, name="Workflow Management"),
url(r'^wfvariables/$', views.workflow_variables),
url(r'^workflowcasequeues/$', views.workflow_case_queues),
url(r'^caseobject/$', views.get_case_object),
url(r'^casevariables/$', views.get_case_variables),
url(r'^casequeuerules/(?P<rule_id>[\w\-]+)/$', views.get_case_queue_rules),
# Custom Functions
url(r'^customfunctions/(?P<function_name>[\w\-]+)/$', views.custom_function,
name='FAAS'),
url(r'^customfunctions/$', views.custom_function, name='FAAS'),
url(r'^customfunction/enable/$', views.custom_function, name='FAAS'),
url(r'^customfunction/open/$', views.custom_function, name='FAAS'),
url(r'^customfunction/create/$', views.custom_function, name='FAAS'),
url(r'^customfunction/save/$', views.custom_function, name='FAAS'),
url(r'^customfunction/publish/$', views.custom_function, name='FAAS'),
url(r'^customfunction/test/$', views.custom_function, name='FAAS'),
url(r'^customfunction/logs/$', views.custom_function, name='FAAS'),
url(r'^customfunction/enable_version/$', views.custom_function, name='FAAS'),
# User Groups APIs
url(r'^usergroups/$', views.user_groups),
url(r'^usergroups/(?P<ug_id>[\w\-]+)/$', views.user_groups),
url(r'^nestedusergroups/(?P<ug_id>[\w\-]+)/$', views.user_groups),
url(r'^userroles/$', views.user_roles),
url(r'^userroles/(?P<role_id>[\w\-]+)/$', views.user_roles),
url(r'^userroles/(?P<role_id>[\w\-]+)/users/(?P<user_id>[\w\-]+)/$', views.user_roles),
url(r'^linkuserstorole/$', views.user_roles_linkusers),
url(r'^users/$', views.implement_users),
url(r'^users/(?P<user_id>[\w\-]+)/$', views.implement_users),
url(r'^solutions/$', views.handle_user_solutions),
url(r'^linkusers/$', views.link_users),
url(r'^linkusers/(?P<ug_id>[\w\-]+)/(?P<user_id>[\w\-]+)/$',
views.link_users),
# Dashboard case management
url(r'^dashboard/getqueues/$', views.dashboard_queues_docs_mapping, name="Queue Management"),
url(r'^dashboard/getdocs/$', views.dashboard_queues_docs_mapping, name="Queue Management"),
# Sources APIs
url(r'^solutions/(?P<solution_id>[\w\-]+)/sources/$',
views.process_sources, name="Source Management"),
url(r'^solutions/(?P<solution_id>[\w\-]+)/sources/(?P<source_id>[\w\-]+)/$',
views.process_sources, name="Source Management"),
url(r'^solutions/(?P<solution_id>[\w\-]+)/filesources/$',
views.process_file_sources, name="Source Management"),
url(r'^solutions/(?P<solution_id>[\w\-]+)/filesources/(?P<source_id>[\w\-]+)/$',
views.process_file_sources, name="Source Management"),
url(r'^s3bucketurl/$', views.get_s3_bucket_url, name="Source Management"),
url(r'^testemailconnection/$', views.test_email_connection, name="Source Management"),
url(r'^docvars/$', views.doc_vars, name="Queue Management"),
# NER service labels
url(r'^nerservice/$', views.ner_service, name="NER Label"),
# New Template URLS
url(r'^ingest/template/$', views.ingest_template_request, name="Ingest Template"),
url(r'^get/template/$', views.get_template_request, name="Get Template"),
url(r'^get/template/(?P<template_id>[\w\-]+)/$', views.get_template_request, name="Get Template with id"),
url(r'^publish/template/$', views.update_template_request, name="Publish Template"),
url(r'^delete/template/$', views.update_template_request, name="Delete Template"),
url(r'^save/template/elements/$', views.save_template_element_request, name="Save Template Elements"),
url(r'^delete/template/elements/$', views.delete_template_element_request, name="Delete Template Elements"),
url(r'^save/template/unknown/$', views.save_unknown_template_request, name="Update Unknown Template"),
# GET, POST, PUT methods of all thresholds
url(r'^solution/(?P<solution_id>[\w\-]+)/workflow/(?P<workflow_id>[\w\-]+)/task/(?P<task_id>[\w\-]+)/threshold/$',
views.threshold, name="update threshold values"),
url(r'^solution/(?P<solution_id>[\w\-]+)/workflow/(?P<workflow_id>[\w\-]+)/task/(?P<task_id>[\w\-]+)/'
r'threshold/(?P<threshold_id>[\w\-]+)/$',
views.threshold_update, name="insert threshold values to existing Data"),
url(r'^queue/agents', views.get_agents_list, name="Queue Management"),
url(r'^workflow/*', views.cm_workflow_management, name="Workflow Management"),
url(r'^queue/*', views.cm_workflow_management, name="Workflow Management"),
url(r'^case/*', views.cm_dashboard_management, name="Workflow Management"),
# Ontologies
url(r'^solution/(?P<solution_id>[\w\-]+)/ontologies/$', views.ontology_service, name="ontologies"),
url(r'^solution/(?P<solution_id>[\w\-]+)/ontologies/(?P<id>[\w\-]+)/$', views.ontology_service, name="ontologies"),
url(r'^solution/(?P<solution_id>[\w\-]+)/ontologies/(?P<id>[\w\-]+)/enable/$', views.ontology_service, name="ontologies"),
]
seed_data("dashboard_config.json", DASHBOARD_CONFIG)
| [
"manjukannaj@gmail.com"
] | manjukannaj@gmail.com |
b6b37405bb24e259a4963554b3502d97de09ecd1 | c5a4f782af777ac012ab3d6fce72588b7cdd3272 | /Day05/perfect.py | 770764a9e54eccf9485c628cd145ad6377da8740 | [] | no_license | makejun168/Python-study | 5606b68247e145de3019432dba76625d1093b82c | 8f741fa5081b4af61a9160ea49d81e981ab3b4e5 | refs/heads/master | 2020-07-01T19:36:35.367234 | 2020-03-18T10:23:47 | 2020-03-18T10:23:47 | 201,275,193 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | # 完美数字
# 找出1~9999之间的所有完美数
# 完美数是除自身外其他所有因子的和正好等于这个数本身的数
# 例如: 6 = 1 + 2 + 3, 28 = 1 + 2 + 4 + 7 + 14
import time
import math
start = time.clock()
for num in range(1, 10000):
sum = 0
for factor in range(1, int(math.sqrt(num)) + 1):
if num % factor == 0:
sum += factor
if factor > 1 and num / factor != factor:
sum += num / factor
if sum == num:
print(num)
end = time.clock()
print("执行时间:", (end - start), "秒")
| [
"873800030@qq.com"
] | 873800030@qq.com |
4e20e0113f12ca54ff93d7de62bcc9e2d82234cf | bca56a70984c620d0e86be6c03a1e18ce3af2c2c | /gym/envs/__init__.py | 44b07343068c4b239f537eb49b6fd7f81f1eb732 | [
"MIT"
] | permissive | davidsonic/self_brewed_gym | 9f44f1fc22a7dcf3cfb4a6850cb10dee8c2c2e17 | 4e0cffb3a6aad1f570aa748c22bf6a289aaa1ab3 | refs/heads/master | 2020-04-15T13:23:34.797762 | 2019-01-12T22:46:53 | 2019-01-12T22:46:53 | 164,715,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,852 | py | from gym.envs.registration import registry, register, make, spec
# Algorithmic
# ----------------------------------------
register(
id='Copy-v0',
entry_point='gym.envs.algorithmic:CopyEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='RepeatCopy-v0',
entry_point='gym.envs.algorithmic:RepeatCopyEnv',
max_episode_steps=200,
reward_threshold=75.0,
)
register(
id='ReversedAddition-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 2},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='ReversedAddition3-v0',
entry_point='gym.envs.algorithmic:ReversedAdditionEnv',
kwargs={'rows' : 3},
max_episode_steps=200,
reward_threshold=25.0,
)
register(
id='DuplicatedInput-v0',
entry_point='gym.envs.algorithmic:DuplicatedInputEnv',
max_episode_steps=200,
reward_threshold=9.0,
)
register(
id='Reverse-v0',
entry_point='gym.envs.algorithmic:ReverseEnv',
max_episode_steps=200,
reward_threshold=25.0,
)
# Classic
# ----------------------------------------
register(
id='CartPole-v0',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=200,
reward_threshold=195.0,
)
register(
id='CartPole-v1',
entry_point='gym.envs.classic_control:CartPoleEnv',
max_episode_steps=500,
reward_threshold=475.0,
)
register(
id='MountainCar-v0',
entry_point='gym.envs.classic_control:MountainCarEnv',
max_episode_steps=200,
reward_threshold=-110.0,
)
register(
id='MountainCarContinuous-v0',
entry_point='gym.envs.classic_control:Continuous_MountainCarEnv',
max_episode_steps=999,
reward_threshold=90.0,
)
register(
id='Pendulum-v0',
entry_point='gym.envs.classic_control:PendulumEnv',
max_episode_steps=200,
)
register(
id='Acrobot-v1',
entry_point='gym.envs.classic_control:AcrobotEnv',
max_episode_steps=500,
)
# Box2d
# ----------------------------------------
register(
id='LunarLander-v2',
entry_point='gym.envs.box2d:LunarLander',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='LunarLanderContinuous-v2',
entry_point='gym.envs.box2d:LunarLanderContinuous',
max_episode_steps=1000,
reward_threshold=200,
)
register(
id='BipedalWalker-v2',
entry_point='gym.envs.box2d:BipedalWalker',
max_episode_steps=1600,
reward_threshold=300,
)
register(
id='BipedalWalkerHardcore-v2',
entry_point='gym.envs.box2d:BipedalWalkerHardcore',
max_episode_steps=2000,
reward_threshold=300,
)
register(
id='CarRacing-v0',
entry_point='gym.envs.box2d:CarRacing',
max_episode_steps=1000,
reward_threshold=900,
)
# Toy Text
# ----------------------------------------
register(
id='Blackjack-v0',
entry_point='gym.envs.toy_text:BlackjackEnv',
)
register(
id='KellyCoinflip-v0',
entry_point='gym.envs.toy_text:KellyCoinflipEnv',
reward_threshold=246.61,
)
register(
id='KellyCoinflipGeneralized-v0',
entry_point='gym.envs.toy_text:KellyCoinflipGeneralizedEnv',
)
register(
id='FrozenLake-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '4x4'},
max_episode_steps=100,
reward_threshold=0.78, # optimum = .8196
)
register(
id='FrozenLake8x8-v0',
entry_point='gym.envs.toy_text:FrozenLakeEnv',
kwargs={'map_name' : '8x8'},
max_episode_steps=200,
reward_threshold=0.99, # optimum = 1
)
register(
id='CliffWalking-v0',
entry_point='gym.envs.toy_text:CliffWalkingEnv',
)
register(
id='NChain-v0',
entry_point='gym.envs.toy_text:NChainEnv',
max_episode_steps=1000,
)
register(
id='Roulette-v0',
entry_point='gym.envs.toy_text:RouletteEnv',
max_episode_steps=100,
)
register(
id='Taxi-v2',
entry_point='gym.envs.toy_text.taxi:TaxiEnv',
reward_threshold=8, # optimum = 8.46
max_episode_steps=200,
)
register(
id='GuessingGame-v0',
entry_point='gym.envs.toy_text.guessing_game:GuessingGame',
max_episode_steps=200,
)
register(
id='HotterColder-v0',
entry_point='gym.envs.toy_text.hotter_colder:HotterColder',
max_episode_steps=200,
)
# Mujoco
# ----------------------------------------
# 2D
register(
id='Reacher-v2',
entry_point='gym.envs.mujoco:ReacherEnv',
max_episode_steps=50,
reward_threshold=-3.75,
)
register(
id='Pusher-v2',
entry_point='gym.envs.mujoco:PusherEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Thrower-v2',
entry_point='gym.envs.mujoco:ThrowerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='Striker-v2',
entry_point='gym.envs.mujoco:StrikerEnv',
max_episode_steps=100,
reward_threshold=0.0,
)
register(
id='InvertedPendulum-v2',
entry_point='gym.envs.mujoco:InvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
# debug
register(
id='MyInvertedPendulum-v2',
entry_point='gym.envs.mujoco:MyInvertedPendulumEnv',
max_episode_steps=1000,
reward_threshold=950.0,
)
register(
id='InvertedDoublePendulum-v2',
entry_point='gym.envs.mujoco:InvertedDoublePendulumEnv',
max_episode_steps=1000,
reward_threshold=9100.0,
)
register(
id='HalfCheetah-v2',
entry_point='gym.envs.mujoco:HalfCheetahEnv',
max_episode_steps=1000,
reward_threshold=4800.0,
)
register(
id='Hopper-v2',
entry_point='gym.envs.mujoco:HopperEnv',
max_episode_steps=1000,
reward_threshold=3800.0,
)
register(
id='Swimmer-v2',
entry_point='gym.envs.mujoco:SwimmerEnv',
max_episode_steps=1000,
reward_threshold=360.0,
)
register(
id='Walker2d-v2',
max_episode_steps=1000,
entry_point='gym.envs.mujoco:Walker2dEnv',
)
register(
id='Ant-v2',
entry_point='gym.envs.mujoco:AntEnv',
max_episode_steps=1000,
reward_threshold=6000.0,
)
register(
id='Humanoid-v2',
entry_point='gym.envs.mujoco:HumanoidEnv',
max_episode_steps=1000,
)
register(
id='HumanoidStandup-v2',
entry_point='gym.envs.mujoco:HumanoidStandupEnv',
max_episode_steps=1000,
)
# debug
register(
id='HumanoidStandup-v1',
entry_point='gym.envs.mujoco:HumanoidStandEnv',
max_episode_steps=1000,
)
# Robotics
# ----------------------------------------
def _merge(a, b):
a.update(b)
return a
for reward_type in ['sparse', 'dense']:
suffix = 'Dense' if reward_type == 'dense' else ''
kwargs = {
'reward_type': reward_type,
}
# Fetch
register(
id='FetchSlide{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchSlideEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchPickAndPlace{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPickAndPlaceEnv',
kwargs=kwargs,
max_episode_steps=50,
)
# debug
register(
id='FetchAdv{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchAdvEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchReach{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='FetchPush{}-v1'.format(suffix),
entry_point='gym.envs.robotics:FetchPushEnv',
kwargs=kwargs,
max_episode_steps=50,
)
# Hand
register(
id='HandReach{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandReachEnv',
kwargs=kwargs,
max_episode_steps=50,
)
register(
id='HandManipulateBlockRotateZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'z'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateParallel{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'parallel'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockRotateXYZ{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateBlockFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulateBlock{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandBlockEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulateEggFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulateEgg{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandEggEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenRotate{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'ignore', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
register(
id='HandManipulatePenFull{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Alias for "Full"
register(
id='HandManipulatePen{}-v0'.format(suffix),
entry_point='gym.envs.robotics:HandPenEnv',
kwargs=_merge({'target_position': 'random', 'target_rotation': 'xyz'}, kwargs),
max_episode_steps=100,
)
# Atari
# ----------------------------------------
# # print ', '.join(["'{}'".format(name.split('.')[0]) for name in atari_py.list_games()])
for game in ['air_raid', 'alien', 'amidar', 'assault', 'asterix', 'asteroids', 'atlantis',
'bank_heist', 'battle_zone', 'beam_rider', 'berzerk', 'bowling', 'boxing', 'breakout', 'carnival',
'centipede', 'chopper_command', 'crazy_climber', 'demon_attack', 'double_dunk',
'elevator_action', 'enduro', 'fishing_derby', 'freeway', 'frostbite', 'gopher', 'gravitar',
'hero', 'ice_hockey', 'jamesbond', 'journey_escape', 'kangaroo', 'krull', 'kung_fu_master',
'montezuma_revenge', 'ms_pacman', 'name_this_game', 'phoenix', 'pitfall', 'pong', 'pooyan',
'private_eye', 'qbert', 'riverraid', 'road_runner', 'robotank', 'seaquest', 'skiing',
'solaris', 'space_invaders', 'star_gunner', 'tennis', 'time_pilot', 'tutankham', 'up_n_down',
'venture', 'video_pinball', 'wizard_of_wor', 'yars_revenge', 'zaxxon']:
for obs_type in ['image', 'ram']:
# space_invaders should yield SpaceInvaders-v0 and SpaceInvaders-ram-v0
name = ''.join([g.capitalize() for g in game.split('_')])
if obs_type == 'ram':
name = '{}-ram'.format(name)
nondeterministic = False
if game == 'elevator_action' and obs_type == 'ram':
# ElevatorAction-ram-v0 seems to yield slightly
# non-deterministic observations about 10% of the time. We
# should track this down eventually, but for now we just
# mark it as nondeterministic.
nondeterministic = True
register(
id='{}-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'repeat_action_probability': 0.25},
max_episode_steps=10000,
nondeterministic=nondeterministic,
)
register(
id='{}-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
# Standard Deterministic (as in the original DeepMind paper)
if game == 'space_invaders':
frameskip = 3
else:
frameskip = 4
# Use a deterministic frame skip.
register(
id='{}Deterministic-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip, 'repeat_action_probability': 0.25},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}Deterministic-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': frameskip},
max_episode_steps=100000,
nondeterministic=nondeterministic,
)
register(
id='{}NoFrameskip-v0'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1, 'repeat_action_probability': 0.25}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# No frameskip. (Atari has no entropy source, so these are
# deterministic environments.)
register(
id='{}NoFrameskip-v4'.format(name),
entry_point='gym.envs.atari:AtariEnv',
kwargs={'game': game, 'obs_type': obs_type, 'frameskip': 1}, # A frameskip of 1 means we get every frame
max_episode_steps=frameskip * 100000,
nondeterministic=nondeterministic,
)
# Unit test
# ---------
register(
id='CubeCrash-v0',
entry_point='gym.envs.unittest:CubeCrash',
reward_threshold=0.9,
)
register(
id='CubeCrashSparse-v0',
entry_point='gym.envs.unittest:CubeCrashSparse',
reward_threshold=0.9,
)
register(
id='CubeCrashScreenBecomesBlack-v0',
entry_point='gym.envs.unittest:CubeCrashScreenBecomesBlack',
reward_threshold=0.9,
)
register(
id='MemorizeDigits-v0',
entry_point='gym.envs.unittest:MemorizeDigits',
reward_threshold=20,
)
| [
"davidsonic@163.com"
] | davidsonic@163.com |
ccf5c767f217b6f4a942cf7108ea3b85349e14e5 | f2418f2bc6015cc157e45a03aedaaf8cc6706bf8 | /Advanced_ML/Text_Generation/toy_rnn.py | 00c6fd5f860a3cf261301d8d068280fc80b73fb8 | [
"MIT"
] | permissive | imsrgadich/Projects_shang | dcaa9411e13866a8e4f3a449405473adda329401 | a9d4395a98a79fb0a700a99168cd358ab7494fdf | refs/heads/master | 2021-01-12T09:09:47.281055 | 2016-12-09T17:18:59 | 2016-12-09T17:18:59 | 76,779,772 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,660 | py | import numpy as np
import theano
import theano.tensor as T
#rnn parameters
learning_rate = 1
input = T.matrix()
W1 = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(2,10)),dtype=theano.config.floatX))
Wh = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(10,10)),dtype=theano.config.floatX))
W2 = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(10,1)),dtype=theano.config.floatX))
b1 = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(10)),dtype=theano.config.floatX))
b2 = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(1)),dtype=theano.config.floatX))
h0 = theano.shared(np.asarray(np.random.uniform(low=-0.1,high=0.1,size=(10)),dtype=theano.config.floatX))
target = T.vector()
def step(input,h0,W1,Wh,W2,b1,b2):
h_out = T.nnet.sigmoid(T.dot(input,W1)+T.dot(h0,Wh)+b1)
return h_out
params = [W1,Wh,W2,b1,b2,h0]
h_output,_ = theano.scan(fn=step,sequences=input,outputs_info=h0,non_sequences=params[:-1])
output = T.nnet.sigmoid(T.dot(h_output,W2)+b2)
cost = T.nnet.binary_crossentropy(output.flatten(1),target).mean()
grads = [T.grad(cost, param) for param in params]
train = theano.function([input,target],cost,updates=[(param,param-learning_rate*grad) for param,grad in zip(params,grads)])
predict = theano.function([input],output)
#train
for i in range(25000):
i1 = np.random.randint(100000000,1000000000)
i2 = np.random.randint(100000000,1000000000)
o1 = i1+i2
i1 = map(int,bin(i1)[2:])
i2 = map(int,bin(i2)[2:])
o1 = map(int,bin(o1)[2:])
if len(i1)>len(i2):
diff = len(i1)-len(i2)
i2 = [0]*diff+i2
elif len(i2)>len(i1):
diff = len(i2)-len(i1)
i1 = [0]*diff+i1
if len(o1)>len(i1):
i1 = [0]+i1
i2 = [0]+i2
i1.reverse()
i2.reverse()
o1.reverse()
X = np.vstack((i1,i2)).T
y = np.array(o1)
print "step %i training error:" % (i+1), train(X,y)
#test
for i in range(100):
in1 = np.random.randint(100000000,1000000000)
in2 = np.random.randint(100000000,1000000000)
actual = in1+in2
i1 = map(int,bin(in1)[2:])
i2 = map(int,bin(in2)[2:])
o1 = map(int,bin(actual)[2:])
if len(i1)>len(i2):
diff = len(i1)-len(i2)
i2 = [0]*diff+i2
elif len(i2)>len(i1):
diff = len(i2)-len(i1)
i1 = [0]*diff+i1
if len(o1)>len(i1):
i1 = [0]+i1
i2 = [0]+i2
i1.reverse()
i2.reverse()
X = np.vstack((i1,i2)).T
pred = predict(X)
pred = int(''.join(list(reversed([str(int(round(p))) for p in pred]))),2)
print "%i + %i: pred: %i actual: %i " % (in1, in2, pred, actual) | [
"emailshang@gmail.com"
] | emailshang@gmail.com |
f09890027a800d9d6ae27200e1657aa0879826f8 | adb5b909e8f1e6568b6e11b53002715b95616262 | /git_2.py | 6c442e6d8a78c58955dfbb9d039991f1f0c0a598 | [] | no_license | saalome/github_demo | 586e4fc656aa019f2e3307614cfad9e870b038a4 | e1ac212d13cac1f96f3d7df0f4c6e5e9940e2d7e | refs/heads/master | 2020-04-02T10:24:32.231016 | 2018-11-06T15:36:31 | 2018-11-06T15:36:31 | 154,338,277 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 748 | py | # # 使用while循环输入 1 2 3 4 5 6 8 9 10
# while True:
# number = int(input('请输入一个数字:'))
# if number > 10:
# break
# elif number == 7:
# continue
# print(number)
# 求1-100的所有数的和
'''
num = list(range(1,101))
sum = 0
for i in num:
sum = sum +i
print(sum)
'''
# 3、输出 1-100 内的所有奇数
# for i in range(1,101,2):
# print(i)
# a = 1
# while True:
# if a%2 == 1:
# print(a)
# if a == 100:
# break
# a = a + 1
# #4、输出 1-100 内的所有偶数
# for i in range(2,101,2):
# print(i)
# 求1 - 2 + 3 - 4 + 5...
# 99
# 的所有数的和
a = list(range(1,101,2))
b = list(range(2,101,2))
for i in c:
print(i)
| [
"saijp8@gmail.com"
] | saijp8@gmail.com |
57a654eaabd0cdc0ff0f03b8648123453f6a3110 | adb1bc3cba42861fdaa80dd110c497dce6443f18 | /triangle/triangle.py | e05948ee37c142700c68894f9d519857c1dbf6a1 | [] | no_license | duongvt55/ktpm2013 | 69dbd37487adf1bd86a9199ccdb76badfa9ad747 | 74c5bef219b33af104f824300ff962f58bb22072 | refs/heads/master | 2020-05-01T06:07:38.040508 | 2013-10-18T16:10:50 | 2013-10-18T16:10:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 792 | py | import math
import decimal
def detect_triangle(a, b, c):
e = 1e-9
if type(a)!=float or type(b)!=float or type(c)!=float :
return "Nhap sai kieu du lieu."
elif a<0 or b<0 or c<0 or a>2**32-1 or b>2**32-1 or c>2**32-1 :
return "Nhap cac gia tri >= 0 va <= 2^32-1."
else:
temp = a
a = max(a,b,c)
if b==a : b = temp
elif c==a : c = temp
if (decimal.Decimal(b+c)<=decimal.Decimal(a)) :
return "Day khong phai tam giac"
elif (a==b or a==c) and b!=c:
return "Day la tam giac can"
elif b==c :
if a==b :
return "Day la tam giac deu"
elif math.fabs(b*b*2 - a*a)<e :
return "Day la tam giac vuong can"
else :
return "Day la tam giac can"
elif math.fabs(b*b + c*c - a*a)<e :
return "Day la tam giac vuong"
else :
return "Day la tam giac thuong" | [
"duongvt.dtg@gmail.com"
] | duongvt.dtg@gmail.com |
fa618fe007c45ce2dff0cb5199b05bd54f170dce | 7fd66e6df78b45aaa82c4857b335f2d8b2bb8326 | /item.py | d00609d646213c572b7dcfa5de199fbf995e54d5 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | maiiku/python-google-merchant-api | fd655ef5a2648d7f224f8824df89ff86fe2836b9 | b689940a9525eeda21372cb67d7e76a5d77b37bd | refs/heads/master | 2021-01-22T13:48:14.000688 | 2012-08-16T13:48:16 | 2012-08-16T13:48:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,546 | py | from lxml import etree
from lxml.builder import ElementMaker # lxml only !
from datetime import datetime,timedelta
class Item():
def __init__(self,**kwargs):
#Required to send
self.title = kwargs.get("title",None)
self.description = kwargs.get("description",None)
self.link = kwargs.get("link",None)
self.uid = kwargs.get("uid",None)
self.image_link = kwargs.get("image_link",None)
self.language = kwargs.get("language",None)
self.country = kwargs.get("country",None)
self.google_product_category = kwargs.get("google_product_category",None)
self.price = kwargs.get("price",None)
self.price_unit = kwargs.get("price_unit",None)
self.shippingprice = kwargs.get("shippingprice",None)
self.availability = kwargs.get("availability",None)
self.condition = kwargs.get("condition",None)
##Set defaults
self._channel = kwargs.get("channel","online")
self._expiration_date = kwargs.get("expiration_date",(datetime.now()+timedelta(days=30)).isoformat().split("T")[0])
self._adult = kwargs.get("adult",False)
##Optional
self.additional_image_link = kwargs.get("additional_image_link",None)
self.age_group = kwargs.get("age_group",None)
self.author = kwargs.get("author",None)
self.brand = kwargs.get("brand",None)
self.color = kwargs.get("color",None)
self.edition = kwargs.get("edition",None)
self.feature = kwargs.get("feature",None)
self.featured_product = kwargs.get("featured_product",None)
self.gender = kwargs.get("gender",None)
self.genre = kwargs.get("genre",None)
self.gtin = kwargs.get("gtin",None)
self.item_group_id = kwargs.get("item_group_id",None)
self.manufacturer = kwargs.get("manufacturer",None)
self.material = kwargs.get("material",None)
self.mpn = kwargs.get("mpn",None)
self.pattern = kwargs.get("pattern",None)
self.product_review_average = kwargs.get("product_review_average",None)
self.product_review_count = kwargs.get("product_review_count",None)
self.product_type = kwargs.get("product_type",None)
self.quantity = kwargs.get("quantity",None)
self.shipping_weight = kwargs.get("shipping_weight",None)
self.size = kwargs.get("size",None)
self.year = kwargs.get("year",None)
self.shipping = []
'''
TODO: SHIPPING/TAX
'''
##Fields with defaults
def channel():
doc = "The channel property, default to online"
def fget(self):
return self._channel
def fset(self, value):
self._channel = value
def fdel(self):
self._channel = "online"
return locals()
channel = property(**channel())
def expiration_date():
doc = "The expiration_date property, default 30 days"
def fget(self):
return self._expiration_date
def fset(self, value):
self._expiration_date = value
def fdel(self):
self._expiration_date = (datetime.now()+timedelta(days=30)).isoformat().split("T")[0]
return locals()
expiration_date = property(**expiration_date())
def adult():
doc = "The adult property."
def fget(self):
return self._adult
def fset(self, value):
self._adult = value
def fdel(self):
self._adult = False
return locals()
adult = property(**adult())
def add_shipping(self,country,service,price):
self.shipping.append({"country":country,"service":service,"price":price})
def create(self):
def nsmap(key):
return "{%s}" % NSMAP[key]
def add_subelement(root,ns,tag,value):
if value: etree.SubElement(entry,nsmap(ns) +tag).text=str(value)
NSMAP = {None : "http://www.w3.org/2005/Atom",
"app":"http://www.w3.org/2007/app",
"gd":"http://schemas.google.com/g/2005",
"sc":"http://schemas.google.com/structuredcontent/2009",
"scp":"http://schemas.google.com/structuredcontent/2009/products",}
entry = etree.Element("entry", nsmap=NSMAP) # lxml only!
add_subelement(entry,None,"title",self.title)
add_subelement(entry,None,"content",self.description)
add_subelement(entry,None, "link",self.link)
add_subelement(entry,"sc","id",self.uid)
add_subelement(entry,"sc","image_link",self.image_link)
add_subelement(entry,"sc","additional_image_link",self.additional_image_link)
add_subelement(entry,"sc","content_language",self.language)
add_subelement(entry,"sc", "target_country",self.country)
add_subelement(entry,"sc", "channel",self.channel)
add_subelement(entry,"sc", "expiration_date",self.expiration_date)
add_subelement(entry,"sc", "adult",self.adult)
add_subelement(entry, "scp", "age_group", self.age_group)
add_subelement(entry,"scp", "author", self.author)
add_subelement(entry,"scp", "availability", self.availability)
add_subelement(entry,"scp", "brand", self.brand)
add_subelement(entry,"scp", "color", self.color)
add_subelement(entry,"scp", "condition", self.condition)
add_subelement(entry,"scp", "edition", self.edition)
add_subelement(entry,"scp", "feature", self.feature)
add_subelement(entry,"scp", "featured_product", self.featured_product)
add_subelement(entry,"scp", "gender", self.gender)
add_subelement(entry,"scp", "genre", self.genre)
add_subelement(entry,"scp", "google_product_category", self.google_product_category)
add_subelement(entry,"scp", "gtin", self.gtin)
add_subelement(entry,"scp", "item_group_id",self.item_group_id)
add_subelement(entry,"scp", "manufacturer", self.manufacturer)
add_subelement(entry,"scp", "material", self.material)
add_subelement(entry,"scp", "mpn", self.mpn)
add_subelement(entry,"scp", "pattern", self.pattern)
if self.price: etree.SubElement(entry,nsmp("scp") + "price",unit=str(self.price_unit)).text = str(self.price)
add_subelement(entry,"scp", "product_review_average",self.product_review_average)
add_subelement(entry,"scp", "product_review_count",self.product_review_count)
add_subelement(entry,"scp", "product_type",self.product_type)
add_subelement(entry,"scp", "quantity",self.quantity)
add_subelement(entry,"scp", "shipping_weight ",self.shipping_weight )
add_subelement(entry,"scp", "size",self.size)
add_subelement(entry,"scp", "year",self.year)
if self.shipping:
etree.SubElement(entry,nsmap("scp") +shipping)
for s in shipping:
etree.SubElement(shipping,nsmp("scp") + "shipping_price",unit=str(self.price_unit)).text = str(s["price"])
add_subelement(shipping,"scp", "scp:shipping_service",self.s["service"])
add_subelement(shipping,"scp", "scp:shipping_country",self.s["country"])
return etree.tostring(entry, pretty_print=True, xml_declaration=True)
| [
"w4rp3d.1@gmail.com"
] | w4rp3d.1@gmail.com |
c942e49a25ef232d0f68b857272e6f54e5fd63e7 | 458b5c6bc28145e6e9128401c95e6a204c5a797c | /desafiobhaskara.py | aa94e9c2e010bb85c0fc98736b349d1ecfd1b2bd | [] | no_license | milenabaiao/python-1 | 85a1a2d7ab4c8c9b44ee6dd6038c86552a5a6ba4 | c32945348151843c03ca93657749c276becd5102 | refs/heads/main | 2023-02-05T20:17:23.005295 | 2020-12-30T22:45:05 | 2020-12-30T22:45:05 | 325,660,591 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 546 | py | import math
a = float(input("Digite o valor de a: "))
b = float(input("Digite o valor de b: "))
c = float(input("Digite o valor de c: "))
delta = b ** 2 - 4 * a * c
if delta ==0:
raiz1 = (-b + math.sqrt(delta)) / (2 * a)
print("A única raiz é: ", raiz1)
else:
if delta < 0:
print("Esta equaçaõ não possui raizes reais")
else:
raiz1 = (-b + math.sqrt(delta)) / (2 * a)
raiz2 = (-b - math.sqrt(delta)) / (2 * a)
print("A primeira raiz é: ", raiz1)
print("A segunda raiz é: ", raiz2)
| [
"milenabaiao@gmail.com"
] | milenabaiao@gmail.com |
1ade444ef58061b442ce23d0b5caa2f07fe5c91b | 44376a053aedc3c4d106b4ff8005e44b2e3462e1 | /coding-old/Codechef/S10E.py | 0709ff62b9f11326be3aceef35d2edf8c3e3ea63 | [] | no_license | zed1025/coding | e273cecfc1fe111dd8e466c5825c9ff153e96e71 | 3e2a8765628f31c9ac620fcb9b473ac93bfe7697 | refs/heads/main | 2023-02-01T19:20:53.972484 | 2020-12-18T05:03:56 | 2020-12-18T05:03:56 | 320,640,232 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 646 | py | '''
https://www.codechef.com/OCT19B/problems/S10E
October 2019 Long Chanllenge Problem 1
Problem Code S10E
'''
def checkGood(arr, x):
t = min(arr)
if x < t:
return True
return False
try:
T = int(input())
except:
pass
while T > 0:
N = int(input())
arr = list(map(int, input().split()))
count = 1
fiveNums = [arr[0]]
for i in range(1, N):
x = checkGood(fiveNums, arr[i])
if x:
count = count + 1
fiveNums.append(arr[i])
if len(fiveNums) > 5:
del fiveNums[0]
print(count)
T = T-1
| [
"amit251098@yahoo.in"
] | amit251098@yahoo.in |
79cca0e194c552ce99963bdc4ef488907defd3f5 | 89b055f0e77379b915f1342c8d82ba12efbd6a5d | /hiddenbyte.py | b26cca493533b42c1573506a0c7be213cad3f316 | [] | no_license | Moonba/GCPPlay | 292f61357ea29e998b804f1defb619776be464f3 | 2e5530596c12c578035fc9e9b3881b1930ffdb49 | refs/heads/master | 2020-06-12T16:36:05.697021 | 2016-12-08T02:17:33 | 2016-12-08T02:17:33 | 75,793,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | with open("BigQueryScript.py") as fp:
for i, line in enumerate(fp):
if "\xe2" in line:
print i, repr(line) | [
"mouna.balghouthi@gmail.com"
] | mouna.balghouthi@gmail.com |
0ddc64aeec1a3bcaa9bb78223741a02a4ab908f2 | 8c8ea797b0821400c3176add36dd59f866b8ac3d | /atcoder/abc/abc014/a.py | 30a599af79c00136ed66cfabfacd1cde704515a3 | [] | no_license | fushime2/competitive | d3d6d8e095842a97d4cad9ca1246ee120d21789f | b2a0f5957d8ae758330f5450306b629006651ad5 | refs/heads/master | 2021-01-21T16:00:57.337828 | 2017-05-20T06:45:46 | 2017-05-20T06:45:46 | 78,257,409 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33 | py | for i in xrange(12):
print i
| [
"okmt52@gmail.com"
] | okmt52@gmail.com |
0397651efe03748abb39dcc12c9ba33634d4df78 | 9ffeed117709b6d0f88bbbe0269685fbdf4eff3e | /lab1/lab1.py | 7b3f52c5e77f6d3ca757a1315844bf799b4391ce | [] | no_license | Kaiel96/MyPythonClass | 081753eec4fa54c8b86e5f39b3d73ffa6c9f95bb | 666552ac14802d291344e34e5fa2b50374091154 | refs/heads/master | 2021-01-10T15:57:08.450872 | 2015-12-22T19:11:28 | 2015-12-22T19:11:28 | 48,448,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | #
# This is a header block example for lab 1.
#
# You will need to supply the following information.
#
# Name:Brannden Moss
# Instructor: Professor Hataskey
# Section:7
#
print ("Hello Brannden.")
| [
"branndenmoss@Branndens-MacBook-Pro.local"
] | branndenmoss@Branndens-MacBook-Pro.local |
12598f014099deae48cea3f0402d007713858b5a | 1fc45a47f0e540941c87b04616f3b4019da9f9a0 | /tests/sentry/api/serializers/test_grouptagvalue.py | d328f092a3656452391add7aa2fc5d6ee396d281 | [
"BSD-2-Clause"
] | permissive | seukjung/sentry-8.15.0 | febc11864a74a68ddb97b146cc1d2438ef019241 | fd3cab65c64fcbc32817885fa44df65534844793 | refs/heads/master | 2022-10-28T06:39:17.063333 | 2018-01-17T12:31:55 | 2018-01-17T12:31:55 | 117,833,103 | 0 | 0 | BSD-3-Clause | 2022-10-05T18:09:54 | 2018-01-17T12:28:13 | Python | UTF-8 | Python | false | false | 1,697 | py | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import six
from sentry.api.serializers import serialize
from sentry.models import EventUser, GroupTagValue, TagValue
from sentry.testutils import TestCase
class GroupTagValueSerializerTest(TestCase):
def test_with_user(self):
user = self.create_user()
project = self.create_project()
euser = EventUser.objects.create(
project=project,
email='foo@example.com',
)
tagvalue = TagValue.objects.create(
project=project,
key='sentry:user',
value=euser.tag_value,
)
grouptagvalue = GroupTagValue.objects.create(
project=project,
group=self.create_group(project=project),
key=tagvalue.key,
value=tagvalue.value,
)
result = serialize(grouptagvalue, user)
assert result['id'] == six.text_type(grouptagvalue.id)
assert result['key'] == 'user'
assert result['value'] == grouptagvalue.value
assert result['name'] == euser.get_label()
def test_with_no_tagvalue(self):
user = self.create_user()
project = self.create_project()
grouptagvalue = GroupTagValue.objects.create(
project=project,
group=self.create_group(project=project),
key='sentry:user',
value='email:foo@example.com',
)
result = serialize(grouptagvalue, user)
assert result['id'] == six.text_type(grouptagvalue.id)
assert result['key'] == 'user'
assert result['value'] == grouptagvalue.value
assert result['name'] == grouptagvalue.value
| [
"jeyce@github.com"
] | jeyce@github.com |
7dc51dcc8666cdd82ee4b318accd3854ae6784a0 | 8f14eda8e82a2348d1462f35674e85e8765355ad | /cpp/target_module/target_module_v1/setup.py | 8ec30c98b010e40f5437e8df7d9023cd7feb0ef8 | [] | no_license | SaulLu/SBCLL | 4cf064d3d3a01b25c3a324b83460f4d410401fec | ed5bb14aa1489793b399b0ad830ddf398e4cbe06 | refs/heads/master | 2020-12-30T06:42:29.895472 | 2020-04-01T08:25:24 | 2020-04-01T08:25:24 | 238,895,367 | 0 | 0 | null | 2020-03-20T08:39:31 | 2020-02-07T10:19:38 | Python | UTF-8 | Python | false | false | 378 | py | from distutils.core import setup, Extension
# cmd python3 setup.py build_ext --inplace
# cmd python setup.py build_ext --inplace
c_ext = Extension("_target_module", ["_target_module.cpp","Attacker.cpp","Attribution.cpp","Attributor.cpp","Checks.cpp","Target.cpp","Geometry.cpp"],
extra_compile_args=['-std=c++11'], language = 'c++')
setup(
ext_modules=[c_ext]
)
| [
"eti1ww@gmail.com"
] | eti1ww@gmail.com |
21e24d1a03ad52908faf36d6b79230d636e0564e | 59ae642e4e2918d00e75ce755a8b1f531342c7ec | /p40-1.1.py | 34e0449b0b80f02901f6a0b54f6a81f3bb647947 | [] | no_license | yintian710/- | b5f34a71e2b6b67deca594c43e0eea29a4b11f04 | de2043a9ffe76692ff55ec7e8f064c6769f9c5e7 | refs/heads/main | 2023-01-13T16:54:53.302594 | 2020-11-24T15:46:48 | 2020-11-24T15:46:48 | 305,124,656 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 977 | py | # coding:utf-8
import math
def f(x, y):
return 1 / (1 + x * x) - 2 * y * y
# return -y + x + 1
def Euler(x, y, h, n):
E = [y]
for i in range(0, n):
y = y + h * f(x, y)
E.append(y)
x += h
return E
def trapezoid(x, y, h, n):
T = [y]
for i in range(0, n):
# k1 = h * f(x, y)
# k2 = h * f(x+h, k1)
# y = y + (k1 + k2) / 2
y1 = y + h*f(x, y)
y = y + h/2 * (f(x, y) + f(x+h, y1))
x += h
T.append(y)
return T
def solution(x, h, n):
S = []
for i in range(0, n):
# y = x + math.e ** (-x)
y = x/(1 + x*x)
S.append(y)
x += h
return S
if __name__ == '__main__':
x0 = 0
y0 = 0
h = 0.1
n = 11
E = Euler(x0, y0, h, n)
T = trapezoid(x0, y0, h, n)
S = solution(x0, h, n)
for i in range(1, n):
print(i, "%.6f" % E[i], "%.6f" % T[i], "%.6f" % S[i],"%.6f" %(E[i]-S[i]),"%.6f" %(T[i]-S[i]))
| [
"noreply@github.com"
] | noreply@github.com |
578c6c754e089392abb4a324f8487938a2f25f1d | 50474276d52b4734994679dccbe8d731949d80b6 | /class5_tratando_txt.py | 9006f132f1805c66aff763059a02043e194750fc | [] | no_license | Mauricio1xtra/Revisar_Python_JCAVI | b21a46ab52732223b43c37bb2a0ae31a9054d39e | d93288e919b43e2fe8c011f90034f350cb2bad7f | refs/heads/master | 2022-12-05T14:51:56.725824 | 2020-08-20T21:55:46 | 2020-08-20T21:55:46 | 285,844,959 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,146 | py | ##Abrir o arquivo com o atributo open e read
arqu1 = open("Files/arquivo.txt",'r')
##Ler o arquivo
print(arqu1.read())
## Seek voltar o cursor
arqu1.seek(0,0)
print(arqu1.read())
## Fechar o arquivo
arqu1.close()
##Atrituto w+ permite gravar(substitui, criar o conteúdo e arquivo) e ler o arquivo
arqu2 = open("Files/arquivo.txt","w+")
arqu2.write("Gravei uma linha\n") #\n faz a quebra de linha
arqu2.write("Gravei segunda linha\n")
#Ler o arquivo
arqu2.seek(0,0)
print(arqu2.read())
#Alterar um arquivo existente e adicionar dados
#Utilizar o modelo a+
arqu2 = open("Files/arquivo.txt","a+")
arqu2.write("Nova linha adicionada\n")
#Ler o arquivo
arqu2.seek(0,0)
print(arqu2.read())
arqu2.close()
##Gerenciando o Contexto do uso dos arquivos
with open("Files/arquivo.txt","w+") as f:
f.read
f.write("Grava primeira linha\n")
f.write("Grava segunda linha\n")
f.seek(0,0)
##Transformar o conteudo do arquivo em uma string (estrutura de dados)
gravar = str(f.read())
##Gravar conteudo de um arquivo em outro
with open("Files/arquivo2.txt","w+") as f2:
f2.write(gravar)
f2.seek(0,0)
print(f2.read()) | [
"bestsound2u@gmail.com"
] | bestsound2u@gmail.com |
7ae787bca6218c85ab763ed18d5cc546dd7a9f72 | 866418a05db550487e5eb6f5063f04f1241ccb4a | /example/11/╡┌11.3_1.py | 39c0355ca01e72cc70a9202afd65d0b91cafe609 | [] | no_license | Freshield/LEARN_Python_Crawler | 37cd552de8fb3f30157326a22a6b5cd62bd74703 | 53406cac38c27960e863c7bd5366bd1ae01ecd6c | refs/heads/main | 2023-02-19T00:36:52.548150 | 2021-01-23T03:40:20 | 2021-01-23T03:40:20 | 326,993,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | import pymongo
# 创建对象,连接本地数据库。
# 方法一:
client = pymongo.MongoClient()
# 方法二:
client = pymongo.MongoClient('localhost', 27017)
# 方法三:
client = pymongo.MongoClient('mongodb://localhost:27017/')
# 连接DB数据库
db = client['DB']
# 连接集合user,集合类似关系数据库的数据表
# 如果集合不存在,会新建集合user
user_collection = db.user
# 设置文档格式(文档即我们常说的数据)
| [
"zxdsw199182@gmail.com"
] | zxdsw199182@gmail.com |
162b837283cce0e9eff9926ea2e027c9fceccae5 | 1e5ae9d745dc6992774217b2c5c32e34e7b7635e | /tiny_url/IdentifierStream.py | 354a2c00c615e3ec3810349207a7656c2f2ab9ff | [] | no_license | QuentinDuval/TinyURL | d9bb4a12fde32da4ee4e7d57fa2b5e05179957d3 | e9460f81c630ff6a3b60025aa68ea687b12b4cf6 | refs/heads/master | 2020-06-15T20:36:24.058529 | 2019-07-05T10:13:51 | 2019-07-05T10:13:51 | 195,387,281 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 645 | py | from kazoo.client import KazooClient
class IdentifierStream:
def __init__(self, zk: KazooClient, reservation_size: int):
self.zk = zk
self.reservation_size = reservation_size
self.current_id = None
self.last_id = None
self.counter = zk.Counter("/tiny_id", default=1)
def __next__(self):
if self.current_id is None or self.current_id >= self.last_id:
self.counter += self.reservation_size
self.current_id = self.counter.pre_value + 1
self.last_id = self.counter.post_value
else:
self.current_id += 1
return self.current_id
| [
"qduval@murex.com"
] | qduval@murex.com |
317dc6c84fdba8998fcffc9c392442f5d580cda9 | f573ff2229e238a0e4e6fa659945432d86e6ed30 | /magma-api.py | f5b03d3381c2ae093d107f1924883cd5295e1ece | [] | no_license | berkanegrice/Non-Official-MagmaAPI | 995f7188cb3b81c50b57a19234c0ffad7dbb75c5 | 0dd9bdcb6f8352ac903ac82274c6f87a091056b5 | refs/heads/main | 2023-08-25T01:31:57.688196 | 2021-11-02T15:46:34 | 2021-11-02T15:46:34 | 334,471,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,097 | py | import xml.etree.ElementTree as ET
import urllib.parse
import os
import sys
# the magma file to be executed.
fn = sys.argv[1]
if os.path.exists(fn):
query_file = open(fn, "r");
raw_str = query_file.read();
query_file.close()
# To encode the given magma file.
encoded = urllib.parse.quote(raw_str, safe='')
# add necesssary part to POST method.
final_str = "input="+encoded
# write into a file encoded final magma file.
out = open("magmaquery.txt", "w+")
out.write(final_str)
out.close()
# call a magma api post method.
run_method = os.popen("curl -XPOST --data @magmaquery.txt http://magma.maths.usyd.edu.au/xml/calculator.xml")
# to get magma api result
magma_res = run_method.read()
# write into a file encoded magma result file.
out_enc_magma = open("magmares.xml", "w+")
out_enc_magma.write(magma_res)
out_enc_magma.close()
# decode the coming result from magma.
root_node = ET.parse("magmares.xml").getroot()
# find the result line/s in the resutlt xml file.
for tag in root_node.findall('results/line'):
print(tag.text)
os.remove("magmaquery.txt")
os.remove("magmares.xml")
| [
"noreply@github.com"
] | noreply@github.com |
8c8b6f461d38545390e7f485d6c1698d70748e2a | 870f8c7d8f3f916df5c6f4537cf31d35b48f6f2f | /create_RRT.py | eafbbaf80bed56128d5cf4a098fbd68e8f4a85a9 | [] | no_license | lauraw7/catkin_ws_user | 56776da03e5006ff60e3cd1f29e8293785d71d89 | 10851e75770f8ea2f5ab0a00a32e1ee6cd0c32c5 | refs/heads/master | 2021-09-05T16:36:39.067752 | 2018-01-29T17:10:32 | 2018-01-29T17:10:32 | 109,979,145 | 0 | 0 | null | 2017-11-08T13:26:06 | 2017-11-08T13:26:05 | null | UTF-8 | Python | false | false | 2,095 | py | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
def create_RTT(num_samples,samples,start,step_length):
vertices = start
edges = []
for i in range(num_samples):
sample = samples[i]
diff = [0]*len(vertices)
if len(vertices)>= 0:
for v in range(len(vertices)):
diff[v]=np.sqrt((vertices[v][0]-sample[0])**2+(vertices[v][1]-sample[1])**2) #calculate difference to each vertices in the graph
ind = diff.index(min(diff)) #choose the vertice with the smallest distance
if diff[ind] <= step_length: #distance is smaller than the step length -> add the sample directly as a new vertices
vertices.append(sample)
edges.append((sample,vertices[ind]))
else:
alpha = np.arcsin((sample[1]-vertices[ind][1])/diff[ind])
y_triangle = np.sin(alpha)*step_length
x_triangle = np.sqrt(step_length**2-y_triangle**2)
x_new = vertices[ind][0]+x_triangle #calculate new x-coordinate
y_new = vertices[ind][1]+y_triangle #calculate new y-coordinate
new_sample = (x_new,y_new)
vertices.append(new_sample) #add the vertice with the new coordinates
edges.append((new_sample,vertices[ind]))
return vertices,edges
def plot_RTT(vertices,edges):
for e in range(len(edges)): #plot all edges as red lines
plt.plot([edges[e][0][0],edges[e][1][0]],[edges[e][0][1],edges[e][1][1]],color='r')#([x1,x2],[y1,y2])
for v in range(len(vertices)):#plot all vertices as filled circles
if v == 0:
plt.scatter(*(vertices[v][0],vertices[v][1]), s=500, color='g') #start in green
plt.text(vertices[v][0]-0.15,vertices[v][1]-0.05,'start')
else:
plt.scatter(*(vertices[v][0],vertices[v][1]), s=500, color='r') #other vertices in red
plt.text(vertices[v][0],vertices[v][1],v)
plt.show()
def main():
step_length = 2
start = [(0,0)]
points = [(3,4),(2,1),(5,4),(2,2),(8,1)]
vertices,edges = create_RTT(5,points,[(0,0)],step_length)
plot_RTT(vertices,edges)
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
f5c6427fa6733d2fab752c262dac8616d209e53a | 36874f6e03921c2103d7b11631286ac8a4562725 | /battery.py | 810f68739ee9e0d9bba2a52607680cf97b0cfbf4 | [] | no_license | neternefer/Python | 9f98e6712b521b8f233efc6a056c0c5620c1a91b | e625444dff65a16f7fd04fca19b433b45f890054 | refs/heads/master | 2021-06-13T13:22:05.659463 | 2019-11-24T18:29:20 | 2019-11-24T18:29:20 | 136,168,913 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 539 | py |
class Battery():
def __init__(self, battery_size=70):
self.battery_size = battery_size
def describe_battery(self):
print("Battery size is " + str(self.battery_size))
def get_range(self):
if self.battery_size == 70:
crange = 240
elif self.battery_size == 85:
crange = 270
message = "This car can go approx " + str(crange)
message += " miles on a full charge."
print(message)
def upgrade_battery(self):
if self.battery_size < 85:
self.battery_size = 85
| [
"noreply@github.com"
] | noreply@github.com |
fbab3ace3bd4c2b48ce96f6434fa96c702fdb9c8 | ca25fd64d291b58ad1d95c8a6d207b1d7f2546a9 | /applications/sumdiff/sumdiff_1.py | cd063c6b7a42a1e0539b5c3605b586f00de19e71 | [] | no_license | jasimrashid/cs-module-project-hash-tables | 3070e10f3acf1d97ee9ca53c2adcc5260ea6c2bc | 4622876a49990da4f477b939f2055cb1cce4c348 | refs/heads/master | 2022-12-06T21:56:59.414837 | 2020-09-04T06:54:19 | 2020-09-04T06:54:19 | 291,763,058 | 0 | 0 | null | 2020-08-31T16:09:56 | 2020-08-31T16:09:55 | null | UTF-8 | Python | false | false | 4,924 | py | """
find all a, b, c, d in q such that
f(a) + f(b) = f(c) - f(d)
"""
from pprint import pprint
#q = set(range(1, 10))
q = set(range(1, 80))
# print(q)
# q = (1, 3, 4, 7, 12)
# print(q)
q = list(q)
# breakpoint()
# TODO: cache this?
print(q)
# f = {}
def f(x):
return x * 4 + 6
def pad(x):
return (3-len(str(x)))*'0'+str(x)
dict_lhs = {}
# Your code here
i = 0
j = 1
gap = 1
while i < len(q)-1:
while j < len(q):
# print(i,j, gap)
x = (3-len(str(i)))*'0'+str(i)
# key1 = pad(i)+'-'+pad(j)
# key2 = pad(j)+'-'+pad(i)
key1 = str(i)+'-'+str(j)
key2 = str(j)+'-'+str(i)
# if neither key is in the dictionary, then compute f(i) + f(j). then create 2 entries in the mappign table
if key1 not in dict_lhs and key2 not in dict_lhs:
dict_lhs[key1] = f(q[i]) + f(q[j])
dict_lhs[key2] = f(q[i]) + f(q[j])
# if either key is in the dictionary, then lookup the value from dictionary and populate the sum ans assign to the empty
elif key1 not in dict_lhs or key2 in dict_lhs:
dict_lhs[key1] = dict_lhs[key2]
elif key1 in dict_lhs or key2 not in dict_lhs:
dict_lhs[key2] = dict_lhs[key1]
else:
continue
i += 1
j += 1
gap += 1
i = 0 if gap < len(q) else len(q)
j = gap + i
# pprint(dict_lhs)
dict_rhs = {}
# Your code here
i = 0
j = 1
gap = 1
while i < len(q)-1:
while j < len(q):
# print(i,j, gap)
# key3 = pad(i)+'-'+pad(j)
# key4 = pad(j)+'-'+pad(i)
key3 = str(i)+'-'+str(j)
key4 = str(j)+'-'+str(i)
# diff_3 = f(q[i]) - f(q[j])
# diff_4 = f(q[j]) - f(q[i])
# if neither key is in the dictionary, then compute f(i) + f(j). then create 2 entries in the mappign table
if key3 not in dict_rhs:# and key4 not in dict_rhs:
dict_rhs[key3] = f(q[i]) - f(q[j])
if key4 not in dict_rhs:
dict_rhs[key4] = abs(f(q[i]) - f(q[j]))
i += 1
j += 1
gap += 1
i = 0 if gap < len(q) else len(q)
j = gap + i
# pprint(dict_rhs)
# for key_l,value_l in dict_lhs.items():
# # breakpoint()
# a = int(key_l[:1])
# b = int(key_l[2:3])
# a_or_b = [a,b]
# # print(a,b, a_or_b)
# for key_r, value_r in dict_rhs.items():
# c = int(key_r[:1])
# d = int(key_r[2:3])
# # print(c,d)
# # if c not in a_or_b and d not in a_or_b:
# # print(value_l,value_r)
# # if value_l == value_r:
# # print("f({q[a]}) + f({q[b]} = f){q[c]} - f({q[d]})")
# if value_l == value_r:
combined = {}
# Your code here
i = 0
j = 1
gap = 1
keys = [i for i in dict_lhs.keys()]
while i < len(dict_lhs)-1:
while j < len(dict_lhs):
# print(i,j, gap)
key = keys[i]+'-'+keys[j]
# key2 = keys[j]+'-'+keys[i]
# if neither key is in the dictionary, then compute f(i) + f(j). then create 2 entries in the mappign table
if key not in combined:
# breakpoint()
combined[key] = dict_lhs[keys[i]] - dict_rhs[keys[j]]
# combined[key2] = dict_lhs[key2] + dict_rhs[key2]
# if either key is in the dictionary, then lookup the value from dictionary and populate the sum ans assign to the empty
# elif key1 not in dict_lhs or key2 in dict_lhs:
# combined[key1] = combined[key2]
# elif key1 in dict_lhs or key2 not in dict_lhs:
# combined[key2] = combined[key1]
else:
continue
i += 1
j += 1
gap += 1
i = 0 if gap < len(dict_lhs) else len(dict_lhs)
j = gap + i
# print('*** combined')
# pprint(combined)
# print('zippp')
# combined = {}
# for (k_l, v_l), (k_r, v_r) in zip(dict_lhs.items(), dict_rhs.items()):
# # combining both lhs and rhs, but keeping the same name
# print(k_l, k_r)
# print(v_l, v_r)
# combined[k_l+'-'+k_r] = v_l - v_r
# make this a list comprehension
combined_f = {}
for key,val in combined.items():
if val == 0:
# breakpoint()
# a = int(key[0:1]) #LOGIC WILL FAIL FOR SIZE > 10!!!! PAD INDICES
# b = int(key[2:3])
# c = int(key[4:5])
# d = int(key[6:7])
pos = key.split('-')
# breakpoint()
# print('**', key,val)
print(f"f({q[int(pos[0])]}) + f({q[int(pos[1])]}) = f({q[int(pos[2])]}) - f({q[int(pos[3])]}) {f(q[int(pos[0])])} + {f(q[int(pos[1])])} = {f(q[int(pos[2])])} - {f(q[int(pos[3])])}")
# combined_f[key] = val
# print('combined')
# pprint(combined)
# breakpoint()
# if neither key is in the dictionary, then compute f(i) + f(j)
# elseit
# pair-up i and i+1; slide right O(n)
# pair-up i and i+2; slide right O(n)
# pair-up i and i+3; slide right O(n) | [
"jasim.rashid@gmail.com"
] | jasim.rashid@gmail.com |
d96347873e9e35694bfbbc5d8c3adf35d0c11a59 | 157d0810d40bbb165889f946566346663cf5b22f | /Python-For-Everyone-Horstmann/Chapter9-Objects-and-Classes/P9_25.py | 4f372df40188975237fd47929d3e6603486ef014 | [] | no_license | dg5921096/Books-solutions | e6ccdcaba0294bdc95e2267723a02d2ba090cb10 | 31bb4bba240bf95aafeb6d189eade62c66a1765a | refs/heads/master | 2021-12-09T16:07:47.756390 | 2021-11-14T07:09:25 | 2021-11-14T07:09:25 | 255,447,147 | 0 | 0 | null | 2020-04-13T21:39:02 | 2020-04-13T21:39:01 | null | UTF-8 | Python | false | false | 794 | py | # Design a class Mailbox that stores e-mail messages, using the Message class of Exercise
# P9.24. Implement the following methods:
# • def addMessage(self, message)
# • def getMessage(self, index)
# • def removeMessage(self, index)
class Mailbox():
def __init__(self):
self._mails = []
def list_messages(self):
output = []
for i, message in enumerate(self._mails):
output.append("[{}] From: {}, To: {}".format(i, message.get_sender(), message.get_recipient()))
return "\n".join(output)
def add_message(self, message_object):
self._mails.append(message_object)
def get_message(self, index):
return self._mails[index].to_string()
def remove_message(self, index):
del self._mails[index]
| [
"syndbe@gmail.com"
] | syndbe@gmail.com |
d12a5cb3005e46bb74e9b4b87694dd8bd09241f6 | aa2fbf0825a1ce9fd02cf017b787ed8a64f31fc2 | /neuronal/classNeurona.py | b5fd4b9eb715ff9b3088f9334fc771c797faf1b2 | [
"MIT"
] | permissive | systemgregorypc/Agatha-inteligencia-artificial- | dc9d7bc8fe4021086c06c40a335b3d3a363777a4 | 0fd1b14b6d9dc3c21c59f333de10f54c589a35a3 | refs/heads/master | 2023-08-07T04:51:45.100697 | 2023-08-01T17:10:04 | 2023-08-01T17:10:04 | 120,570,631 | 0 | 0 | MIT | 2023-07-14T16:12:25 | 2018-02-07T06:02:32 | C# | UTF-8 | Python | false | false | 1,125 | py | from math import exp
class Neurona():
def __init__(self):
self.a = float(input("Ingrese valor de alpha:\n"))
self.x = input("Ingrese los valores de entradas con una coma despues de cada dato: \n")
self.w = input("Ingrese los valores de peso con una coma despues de cada dato: \n")
def suma(self, x, w):
Vrx = 0
for elemento in range(0, len(x)):
Vrx = Vrx + w[elemento]*x[elemento]
return Vrx
def sigmoide(self, a, suma):
sigmo = 1/(1+exp(-a*suma))
return sigmo
def lineal(self, a, suma):
line = suma*a
return line
def stepFunction(self, a, suma):
if suma<a:
return 0
else:
return 1
neurona = Neurona()
v = neurona.suma(neurona.x,neurona.w)
print("Sumatoria: %.2f" %v)
sigmo = neurona.sigmoide(neurona.a, v)
print("Resultando de aplicar a Sigmoide: %.4f" %sigmo)
line = neurona.lineal(neurona.a, v)
print("Resultado de aplicar a Lineal: %.4f" %line)
step = neurona.stepFunction(neurona.a, v)
print("Resultado de aplicar a Escalon Unitario: %.4f" %step)
| [
"noreply@github.com"
] | noreply@github.com |
1da774ed56935754ec237bf680af606d094710d0 | a4d2077522ba85c46d3ba9e34fa9bc885c60ce4d | /python_learn/arithmatic_operation_exercise.py | c761b354b46665a3234d5024a0dc89a8d9d6bf10 | [] | no_license | svnas1994/python_Learn | 18fcf2d22729b54aa5076c99fc05d329072e0758 | 66bb47d85ae291efde429674931c440f1cfa1348 | refs/heads/master | 2023-06-10T03:22:16.516636 | 2021-06-25T20:54:29 | 2021-06-25T20:54:29 | 367,152,530 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 803 | py | """This programme is for arithmatic operation of two numbers"""
a = int(input("enter the number"))
b = int(input("enter the number"))
print ("for addtion enter add\n\
for subtraction etner sub\n\
for multipliction enter mul\n\
for remainder of division add rem")
c=input("enter your required operation").lower()
def add(a, b):
'''adding the two numbers'''
print (a+b)
def sub(a, b):
'''subtracting the two numbers'''
print (a-b)
def mul(a, b):
'''multiplication the two numbers'''
print (a*b)
def rem(a,b):
''' remainder of divison'''
print(a%b)
if c == "add":
add(a, b)
elif c == "sub":
sub(a, b)
elif c == "mul":
mul(a, b)
elif c == "rem":
rem(a,b)
else:
print ("the requested operation is not supported")
print ("adding some extra lllinasdfe") | [
"sriramsairam347@gmail.com"
] | sriramsairam347@gmail.com |
cff6408a60681e0ad93e24e0618f5fbb169d4684 | 71142e0638354a5025b378693ce5d0129fc833ba | /chapter03/decisionTree.py | 147efbebae695053d576e3ba97e1272aee34fa80 | [] | no_license | 928060115/machineLearning | b43cbb5ed732c04d78f7c62162405cc3f8f0a609 | a50b31340b56e287d3c615966ad11b707e354b6a | refs/heads/master | 2020-03-21T06:54:14.857248 | 2018-06-25T11:03:24 | 2018-06-25T11:03:24 | 138,248,553 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,276 | py | # -*- coding:utf-8 -*-
"""
@author:ly
@file: decisionTree.py
@time: 2018/6/2510:37
@version: v1.0
@Dec: 实现决策树
"""
from math import log
import operator
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import pickle
import os
import time
from sklearn import tree
"""
函数说明:计算给定数据集的经验熵(香农熵)
:parameter
dataSet - 数据集
:returns
shannonEnt - 经验熵(香农熵)
"""
def calcShannonEnt(dataSet):
# 返回数据集的行数
numEntires = len(dataSet)
# 保存每个标签(label)出现次数的字典
labelCounts = {}
# 对每组特征向量进行统计
for featVec in dataSet:
# 当前标签(label)信息
currentLabel = featVec[-1]
# 如果标签没有放入到统计次数的字典,添加进去
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
# label计数
labelCounts[currentLabel] += 1
# 初始设置经验熵
shannonEnt = 0.0
for key in labelCounts:
# 选择该标签的概率
prod = float(labelCounts[key]) / numEntires
# 利用公式计算
shannonEnt -= prod * log(prod, 2)
# 返回经验熵
return shannonEnt
"""
函数说明:创建测试数据集
:parameter
无
:returns
dataSet - 数据集
labels - 特征标签
"""
def createDataSet():
# 数据集
dataSet = [[0, 0, 0, 0, 'no'],
[0, 0, 0, 1, 'no'],
[0, 1, 0, 1, 'yes'],
[0, 1, 1, 0, 'yes'],
[0, 0, 0, 0, 'no'],
[1, 0, 0, 0, 'no'],
[1, 0, 0, 1, 'no'],
[1, 1, 1, 1, 'yes'],
[1, 0, 1, 2, 'yes'],
[1, 0, 1, 2, 'yes'],
[2, 0, 1, 2, 'yes'],
[2, 0, 1, 1, 'yes'],
[2, 1, 0, 1, 'yes'],
[2, 1, 0, 2, 'yes'],
[2, 0, 0, 0, 'no']]
labels = ['年龄','有工作','有自己的房子','信贷情况']
return dataSet,labels
"""
函数说明:按照给定特征划分数据集
:parameter
dataSet - 待划分的数据集
axis - 划分数据集的特征
value - 需要返回的特征的值
:returns
无
"""
def splitDataSet(dataSet,axis,value):
# 创建返回的数据集列表
retDataSet = []
# 遍历数据集
for featVec in dataSet:
if featVec[axis] == value:
# 去掉axis特征
reducedFeatVec = featVec[:axis]
# 将符合条件的添加到返回的数据集
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
# 返回划分后的数据集
return retDataSet
"""
函数说明:选择最优特征
:parameter
dataSet - 数据集
:returns
bestFeature - 信息增益最大的(最优)特征的索引值
"""
def chooseBestFeatureToSplit(dataSet):
# 特征数量
numFeatures = len(dataSet[0]) - 1
# 计算数据集的香农熵
baseEntropy = calcShannonEnt(dataSet)
# 信息增益
bestInfoGain = 0.0
# 最优特征的索引值
bestFeature = -1
# 遍历所有特征
for i in range(numFeatures):
# 获取dataSet的第i个特征
featlist = [example[i] for example in dataSet]
# 创建set集合,去重,元素不可重复
uniquevals = set(featlist)
# 经验条件熵
newEntropy = 0.0
# 计算信息增益
for value in uniquevals:
# subDataSet划分后的子集
subDataSet = splitDataSet(dataSet,i,value)
# 计算子集的概率
prob = len(subDataSet) / float(len(dataSet))
# 根据公式计算经验条件熵
newEntropy += prob * calcShannonEnt(subDataSet)
# 信息增益
infoGain = baseEntropy - newEntropy
# 打印每个特征的信息增益
print("第%d个特征的增益为%.3f" %(i,infoGain))
if (infoGain > bestInfoGain):
# 更新信息增益,找到最大信息增益
bestInfoGain = infoGain
# 记录最大信息增益的特征的索引值
bestFeature = i
# 返回信息增益最大的特征的索引值
return bestFeature
"""
函数说明:统计classList中出现此处最多的元素(类标签)
:parameter
classList - 类标签列表
:returns
sortedClassCount[0][0] - 出现此处最多的元素(类标签)
"""
def majorityCnt(classList):
classCount = {}
# 统计classList中每个元素出现的次数
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
# 返回classList中出现次数最多的元素
return sortedClassCount[0][0]
"""
函数说明:创建决策树
:parameter
dataSet - 训练数据集
labels - 分类属性标签
featLabels - 存储选择的最优特征标签
:returns
myTree - 决策树
"""
def createTree(dataSet,labels,featLabels):
# 获取分类标签(是否放贷:yes or no)
classList = [example[-1] for example in dataSet]
# 如果类别完全相同则停止继续划分
if classList.count(classList[0]) == len(classList):
return classList[0]
# 遍历完所有特征时返回出现次数最多的类标签
if len(dataSet[0]) == 1:
return majorityCnt(classList)
# 选择最优特征
bestFeat = chooseBestFeatureToSplit(dataSet)
# 最优特征的标签
bestFeatLabel = labels[bestFeat]
featLabels.append(bestFeatLabel)
# 根据最优特征的标签生成树
myTree = {bestFeatLabel:{}}
# 删除已经使用特征标签
del(labels[bestFeat])
# 得到训练集中所有最优特征的属性值
featValues = [example[bestFeat] for example in dataSet]
#去掉重复的属性值
uniqueVlas = set(featValues)
# 遍历特征,创建决策树
for value in uniqueVlas:
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet,bestFeat,value),labels,featLabels)
# 返回决策树
return myTree
"""
函数说明:获取决策树叶子节点的数目
:parameter
myTree - 决策树
:returns
numLeafs - 决策树的叶子节点数目
"""
def getNumLeafs(myTree):
# 初始化叶子节点数目
numLeafs = 0
firstStr = next(iter(myTree))
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
numLeafs += getNumLeafs(secondDict[key])
else:
numLeafs += 1
return numLeafs
"""
函数说明:获取决策树层数
:parameter
myTree - 决策树
:returns
maxDepth - 决策树的层数
"""
def getTreeDepth(myTree):
maxDepth = 0
firstStr = next(iter(myTree))
secondDict = myTree[firstStr]
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
thisDepth = 1 + getTreeDepth(secondDict[key])
else:
thisDepth = 1
if thisDepth > maxDepth:
maxDepth = thisDepth
return maxDepth
"""
函数说明:绘制结点
:parameter
nodeTxt - 结点名
centerPt - 文本位置
parentPt - 标注箭头的位置
nodeType - 结点格式
:returns
无
"""
def plotNode(nodeTxt,centerPt,parentPt,nodeType):
arrow_args = dict(arrowstyle="<-")
# 设置汉字格式
font = FontProperties(fname="C:\Windows\Fonts\simsun.ttc", size=14)
# 绘制结点
createPlot.ax1.annotate(nodeTxt,xy=parentPt,xycoords='axes fraction',xytext=centerPt,
textcoords='axes fraction',va='center',ha='center',bbox=nodeType,
arrowprops=arrow_args,FontProperties=font)
"""
函数说明:标注有向边属性值
:parameter
cntrPt、parentPt - 用于计算标注位置
txtString - 标注的内容
:returns
无
"""
def plotMidText(cntrPt,parentPt,txtString):
xMid = (parentPt[0]-cntrPt[0])/2.0 + cntrPt[0]
yMid = (parentPt[1]-cntrPt[1])/2.0 + cntrPt[1]
createPlot.ax1.text(xMid,yMid,txtString,va='center',ha='center',rotation=30)
"""
函数说明:绘制决策树
:parameter
myTree - 决策树(字典)
parentPt - 标注的内容
nodeTxt - 结点名
:returns
无
"""
def plotTree(myTree,parentPt,nodeTxt):
decisionNode = dict(boxstyle="sawtooth",fc="0.8")
leafNode = dict(boxstyle="round4",fc="0.8")
numLeafs = getNumLeafs(myTree)
depth = getTreeDepth(myTree)
# 跟结点
firstStr = next(iter(myTree))
# 中心位置
cntrPt = (plotTree.xOff + (1.0 + float(numLeafs))/2.0/plotTree.totalW,plotTree.yOff)
plotMidText(cntrPt,parentPt,nodeTxt)
plotNode(firstStr,cntrPt,parentPt,decisionNode)
secondDict = myTree[firstStr]
# y偏移
plotTree.yOff = plotTree.yOff - 1.0/plotTree.totalD
for key in secondDict.keys():
if type(secondDict[key]).__name__ == 'dict':
plotTree(secondDict[key],cntrPt,str(key))
else:
plotTree.xOff = plotTree.xOff + 1.0 /plotTree.totalW
plotNode(secondDict[key],(plotTree.xOff,plotTree.yOff),cntrPt,leafNode)
plotMidText((plotTree.xOff,plotTree.yOff),cntrPt,str(key))
plotTree.yOff = plotTree.yOff + 1.0/plotTree.totalD
"""
函数说明:创建绘制面板
:parameter
inTree - 决策树(字典)
:returns
无
"""
def createPlot(inTree):
# 创建fig
fig = plt.figure(1,facecolor="white")
# 清空fig
fig.clf()
# 去掉x,y轴
axprops = dict(xticks=[],yticks=[])
createPlot.ax1 = plt.subplot(111,frameon=False,**axprops)
plotTree.totalW = float(getNumLeafs(inTree))
plotTree.totalD = float(getTreeDepth(inTree))
plotTree.xOff = -0.5/plotTree.totalW
plotTree.yOff = 1.0
plotTree(inTree,(0.5,1.0),'')
plt.show()
"""
函数说明:使用决策树分类
:parameter
inputTree - 已经生成的决策树
feaLabels - 存储选择的最优特征标签
testVec - 测试数据列表,顺序对应最优特征标签
:returns
classLabel - 分类结果
"""
def classify(inputTree,featLabels,testVec):
firstStr = next(iter(inputTree)) # 获取决策树结点
secondDict = inputTree[firstStr] # 下一个字典
featIndex = featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
"""
函数说明:存储决策树
:parameter
inputTree - 已经生成的决策树
filename - 决策树的存储文件名
:returns
无
"""
def storeTree(inputTree,filename):
if not os.path.exists(filename.split('/')[0]):
os.mkdir('data')
if not os.path.isfile(filename):
with open(filename,'wb+') as fw:
pickle.dump(inputTree,fw)
"""
函数说明:读取决策树
:parameter
filename - 决策树的存储文件名
:returns
pickle.load(fr) - 决策树字典
"""
def grabTree(filename):
with open(filename,'rb') as fr:
return pickle.load(fr)
if __name__ == '__main__':
start = time.clock()
dataSet,labels = createDataSet()
featLabels = []
myTree = createTree(dataSet,labels,featLabels)
storeTree(myTree,'data/classifierStorage')
myTree = grabTree('data/classifierStorage')
print(myTree)
# createPlot(myTree)
testVec = [0,1]
result = classify(myTree,featLabels,testVec)
if result == 'yes':
print("可以放贷")
elif result == 'no':
print("不能放贷")
else:
print('未知结果,不放贷')
# clf = tree.DecisionTreeClassifier(criterion='entropy')
# label_value = []
# for i in range(len(dataSet)):
# label_value.append(dataSet[i][-1])
# dataSet[i] = dataSet[i][:-1]
#
# print(dataSet,label_value)
# clf = clf.fit(dataSet,label_value)
# print(clf.predict([[0,0,0,2]]))
end = time.clock()
print("final is in",end-start)
| [
"liuyang@chengyiwm.com"
] | liuyang@chengyiwm.com |
fa396f3d732869e588ebacde8dbe60d664cd334c | a2839839c4d7ab8d673942aa7b616a9206f2658f | /models/lime_nlp_utils.py | c9f0ead6e9c79ef8c282cce07d8cd65c2bfd155f | [] | no_license | Nanjangpan/XAI | fa1b5216ffd774e5cb83741d74ba984a991d35fd | 829f1e108aa2e291f17a2b18fcf01aa86ddf0922 | refs/heads/master | 2022-12-27T14:01:35.011200 | 2020-10-19T05:39:52 | 2020-10-19T05:39:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,365 | py | from keras.preprocessing.sequence import pad_sequences
from keras.preprocessing.text import Tokenizer
from keras.models import Sequential
from keras.layers import Dense, Embedding, Bidirectional, LSTM
from sklearn.pipeline import TransformerMixin
from sklearn.base import BaseEstimator
import numpy as np
class TextsToSequences(Tokenizer, BaseEstimator, TransformerMixin):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def fit(self, texts, y=None):
self.fit_on_texts(texts)
return self
def transform(self, texts, y=None):
return np.array(self.texts_to_sequences(texts))
class Padder(BaseEstimator, TransformerMixin):
def __init__(self, maxlen=500):
self.maxlen = maxlen
self.max_index = None
def fit(self, X, y=None):
self.max_index = pad_sequences(X, maxlen=self.maxlen).max()
return self
def transform(self, X, y=None):
X = pad_sequences(X, maxlen=self.maxlen)
X[X > self.max_index] = 0
return X
def create_model(max_features):
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(Bidirectional(LSTM(128, dropout=0.5, recurrent_dropout=0.5)))
model.add(Dense(1, activation='sigmoid'))
model.compile('adam', 'binary_crossentropy', metrics=['accuracy'])
return model
| [
"hamacojr@yonsei.ac.kr"
] | hamacojr@yonsei.ac.kr |
f7fa5c987441f976dde82360c889c4b93fddc09c | 5085579e6b58108b9a3ecd9740b5f2c01f4333c3 | /LinearRegression/batch_gradient_descent.py | 6670aee240537094eac2b0c4746bb5fc978aed9a | [] | no_license | Paul-Wissler/cs-6350-hw4 | b08f8eecb6251b4f39257d90e3c890d6534a7721 | 4745e1acf813c9238da284d0f0f5422f191d2794 | refs/heads/main | 2023-09-06T07:50:27.144412 | 2021-11-20T06:38:12 | 2021-11-20T06:38:12 | 424,767,427 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,220 | py | import numpy as np
import pandas as pd
class BatchGradientDescentModel:
def __init__(self, X: pd.DataFrame, y: pd.Series, rate=0.1, convergence_threshold=1e-6, max_rounds=100, bias=0):
X['MODEL_BIAS'] = 1
self.X = X.copy()
self.y = y.copy()
self.max_rounds = max_rounds
self.rate = rate
self.convergence_threshold = convergence_threshold
self.cost_of_each_step = pd.Series()
self.weights = self.create_model(X.copy(), y.copy(), bias)
def create_model(self, X: pd.DataFrame, y: pd.Series, bias: float):
w = pd.Series([0] * len(X.columns), index=X.columns)
self.cost_of_each_step = pd.Series(
[self.compute_cost(self.X.copy(), self.y.copy(), w)]
).reset_index(drop=True)
self.convergence_of_weights = pd.Series()
w.loc['MODEL_BIAS'] = bias
i = 0
while i <= self.max_rounds + 1:
if i == self.max_rounds:
print('WARNING: Model failed to converge')
return w
i += 1
w = self.compute_new_weights(self.compute_gradient(w), w)
if self.convergence_of_weights.iloc[-1] < self.convergence_threshold:
return w
return w
def compute_new_weights(self, gradient: pd.Series, weights: pd.Series) -> pd.Series:
new_weights = weights - self.rate * gradient
new_weights.name = 'weights'
self.convergence_of_weights = (
self.convergence_of_weights.append(
pd.Series([self.compute_norm(new_weights - weights)]),
ignore_index=True
).reset_index(drop=True)
)
self.cost_of_each_step = (
self.cost_of_each_step.append(
pd.Series([self.compute_cost(self.X.copy(), self.y.copy(), new_weights)]),
ignore_index=True
).reset_index(drop=True)
)
return new_weights
def compute_gradient(self, weights: pd.Series) -> pd.Series:
gradient = pd.Series(index=weights.index, name='gradient')
for col, _ in gradient.iteritems():
gradient[col] = self.compute_dJ_dw_j(weights.copy(), col)
return gradient
def compute_dJ_dw_j(self, weights: pd.Series, col: str) -> float:
x_i_multiply_w = np.dot(self.X.to_numpy(), weights.to_numpy())
error = self.y.to_numpy() - x_i_multiply_w
return -np.dot(error, self.X[col].to_numpy())
def compute_cost(self, X, y, weights: pd.Series) -> float:
X['MODEL_BIAS'] = weights['MODEL_BIAS']
x_i_multiply_w = np.dot(X.to_numpy(), weights.to_numpy())
error = np.square(y.to_numpy() - x_i_multiply_w)
return np.sum(0.5 * np.square(error))
def compute_mean_error(self, weights: pd.Series) -> np.ndarray:
e = self.compute_point_error(weights)
return np.abs(np.mean(e))
# TODO: Implement in other functions?
def compute_point_error(self, weights: pd.Series) -> np.ndarray:
x_i_multiply_w = np.dot(self.X.to_numpy(), weights.to_numpy())
return self.y.to_numpy() - x_i_multiply_w
@staticmethod
def compute_norm(x) -> float:
return np.linalg.norm(x)
| [
"u0791342@utah.edu"
] | u0791342@utah.edu |
e409354eda49035b536afe57d5e5b4b5a86b643f | 2321ea62872af285a494119662af95808fd20f58 | /gribi/src/util/util.py | e9f160cd2e7832fffa6256fdf41204c2c1355d21 | [] | no_license | nanog75/code-samples | 45f47eb9038dfb428686cdae8f0063dfaa437262 | a64e8ab62844abc2b32f1a168ebbba31b35ad43d | refs/heads/master | 2020-04-21T19:49:46.102278 | 2019-02-17T16:50:44 | 2019-02-17T16:50:44 | 169,821,203 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,666 | py | #
# Copyright (c) 2016 by cisco Systems, Inc.
# All rights reserved.
#
# Standard python libs
import os
import ipaddress
#
# Get the GRPC Server IP address and port number
#
def get_server_ip_port():
# Get GRPC Server's IP from the environment
if 'SERVER_IP' not in os.environ.keys():
print("Need to set the SERVER_IP env variable e.g.")
print("export SERVER_IP='10.30.110.214'")
os._exit(0)
# Get GRPC Server's Port from the environment
if 'SERVER_PORT' not in os.environ.keys():
print("Need to set the SERVER_PORT env variable e.g.")
print("export SERVER_PORT='57777'")
os._exit(0)
return (os.environ['SERVER_IP'], int(os.environ['SERVER_PORT']))
#
# Increment a v4 or v6 prefix
#
def util_inc_prefix(prefix, prefix_len, num=1, af=4):
if af == 4:
prefix_max_len = 32
else:
prefix_max_len = 128
if prefix_len > prefix_max_len:
print("prefix_len %d > max %d" %(prefix_len, prefix_max_len))
os._exit(0)
val = 1<<(prefix_max_len - prefix_len)
return val + prefix
if __name__ == '__main__':
prefix_len = 24
prefix4 = int(ipaddress.ip_address("10.0.0.0"))
prefix6 = int(ipaddress.ip_address("10::"))
for i in range(4):
prefix4 = util_inc_prefix(prefix4, prefix_len, 1, 4)
print("Prefix %s" %(str(ipaddress.ip_address(prefix4))))
prefix6 = util_inc_prefix(prefix6, prefix_len, 1, 6)
print("Prefix %s" %(str(ipaddress.ip_address(prefix6))))
server_ip, server_port = get_server_ip_port()
print("Using GRPC Server IP(%s) Port(%s)" %(server_ip, server_port))
| [
"echen102@gmail.com"
] | echen102@gmail.com |
69125d0d670089b391b47812638b43f7c459c0b5 | 396f93d8e73c419ef82a94174815a2cecbb8334b | /.history/tester2_20200322174510.py | 7aa63cf5264177dcd88e4e6fbb297d2a899aa036 | [] | no_license | mirfarzam/ArtificialIntelligence-HeuristicAlgorithm-TabuSearch | 8c73d9448b916009c9431526864a4441fdeb682a | 90b2dca920c85cddd7c1b3335344ac7b10a9b061 | refs/heads/master | 2021-03-26T21:16:42.561068 | 2020-04-17T21:44:26 | 2020-04-17T21:44:26 | 247,750,502 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,365 | py | import os
import subprocess
import re
from datetime import datetime
import time
from statistics import mean
numberOfTests = 100
tabuIteration = '1000'
tabuDuration = '40'
numberOfCities = '50'
final_solution = []
list_coverage = []
local_minimum = []
print(f"\n\nTest for Tabu Search with this config: \n\tIterations : {tabuIteration} \n\tDuration(Tabu Memory): {tabuDuration} \n\tNumber of Cities: {numberOfCities}")
for i in range(0, numberOfTests):
process = subprocess.Popen(['./algo_tabou.exe', tabuIteration , tabuDuration, numberOfCities, 'distances_entre_villes_{}.txt'.format(numberOfCities)],stdout=subprocess.PIPE,stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
result = stdout
result = re.sub(r'\s', ' ', str(result))
solution = (re.findall(r'([0-9]{4,7}) km', result))[-1]
final_solution.append(int(solution))
coverage = re.findall(r'On est dans un minimum local a l\'iteration ([0-9]+) ->', result)
if coverage != []:
coverage = int(coverage[0])+ 1
if int(solution) != 5644:
local_minimum.append(int(solution))
else:
coverage = int(tabuIteration)
number_of_solution_before_coverage = coverage
list_coverage.append(coverage)
print('{} : best found solution is {} and found in interation {}, number of solutions before coverage : {}'.format(i, solution, coverage, number_of_solution_before_coverage))
time.sleep( 1 )
print("Summary:")
optimum_result = len(list(filter(lambda x: x == 5644, final_solution)))
print(f'number of optimum solution found is {optimum_result}, so in {numberOfTests} runs of test we faced {(optimum_result/numberOfTests)*100}% coverage')
print(f'in average this test shows that we found the global optimum solution in iteration {mean(list_coverage)}\nand in worst we found it in iteration {max(list_coverage)} \nand in best case in iteration {max(list_coverage)}')
print(f'Totally, {sum(list_coverage)} cities visited before finding the global optimum in {numberOfTests} runs of this test\n\n\n')
unique_local_minimum = list(dict.fromkeys(local_minimum))
print(f'totally the algorithm was stuck in local optimum {len(local_minimum)} times \nthere are {len(unique_local_minimum)} unique local minimum \nthe best local optimum is {min(unique_local_minimum)} \nthe worst local optimum is {max(unique_local_minimum)}') | [
"farzam.mirmoeini@gmail.com"
] | farzam.mirmoeini@gmail.com |
39594fdee131460d72ed27b8368bb94bb8fdcf98 | 50203b4a349dcb2ed1e72c9f5463d84db8a6e983 | /skyline/webapp/luminosity_cloudbursts.py | 0048fa8b6fb7fcecf903537da9ede688c880d75c | [
"MIT"
] | permissive | earthgecko/skyline | 97e43df824d7c92d68086f529f0f3d051a7debb0 | c2edc451e63d5eb57117ddcfbc6e79100e706460 | refs/heads/master | 2023-08-30T08:36:50.740285 | 2023-06-28T15:33:47 | 2023-06-28T15:33:47 | 20,475,900 | 482 | 74 | NOASSERTION | 2023-06-28T15:33:49 | 2014-06-04T08:33:15 | Python | UTF-8 | Python | false | false | 12,756 | py | import logging
import os
from ast import literal_eval
import traceback
import settings
import skyline_version
from skyline_functions import get_redis_conn_decoded
from matched_or_regexed_in_list import matched_or_regexed_in_list
from sqlalchemy.sql import select
from database import get_engine, cloudburst_table_meta
# @added 20221103 - Task #2732: Prometheus to Skyline
# Branch #4300: prometheus
from functions.metrics.get_base_name_from_labelled_metrics_name import get_base_name_from_labelled_metrics_name
skyline_version = skyline_version.__absolute_version__
skyline_app = 'webapp'
skyline_app_logger = '%sLog' % skyline_app
logger = logging.getLogger(skyline_app_logger)
skyline_app_logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
logfile = '%s/%s.log' % (settings.LOG_PATH, skyline_app)
try:
ENABLE_WEBAPP_DEBUG = settings.ENABLE_WEBAPP_DEBUG
except EnvironmentError as outer_err:
logger.error('error :: cannot determine ENABLE_WEBAPP_DEBUG from settings - %s' % outer_err)
ENABLE_WEBAPP_DEBUG = False
this_host = str(os.uname()[1])
def get_filtered_metrics(redis_conn_decoded, namespaces):
"""
Get create a list of filter_by_metrics.
:param redis_conn_decoded: the redis_conn_decoded object
:param namespaces: the namespaces to match
:param from_timestamp: the from_timestamp
:param until_timestamp: the until_timestamp
:type redis_conn_decoded: str
:return: list of metrics
:rtype: list
"""
function_str = 'get_cloudbursts :: get_filtered_metrics'
filter_by_metrics = []
redis_key = 'analyzer.metrics_manager.db.metric_names'
unique_base_names = []
try:
unique_base_names = list(redis_conn_decoded.smembers(redis_key))
if unique_base_names:
logger.info('%s :: got %s unique_base_names' % (
function_str, str(len(unique_base_names))))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get Redis key %s - %s' % (
function_str, redis_key, err))
raise
for base_name in unique_base_names:
try:
pattern_match, metric_matched_by = matched_or_regexed_in_list(skyline_app, base_name, namespaces)
if pattern_match:
filter_by_metrics.append(base_name)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get Redis key %s - %s' % (
function_str, redis_key, err))
return filter_by_metrics
def get_metric_ids(redis_conn_decoded, filter_by_metrics):
"""
Get create a list of metric ids and dict of metric_names_with_ids.
:param redis_conn_decoded: the redis_conn_decoded object
:param namespaces: the namespaces to match
:param from_timestamp: the from_timestamp
:param until_timestamp: the until_timestamp
:type redis_conn_decoded: str
:return: (list of metrics, dict of metric_names_with_ids)
:rtype: (list, dict)
"""
function_str = 'get_cloudbursts :: get_metric_ids'
metric_ids = []
try:
metric_names_with_ids = redis_conn_decoded.hgetall('aet.metrics_manager.metric_names_with_ids')
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get Redis hash aet.metrics_manager.metric_names_with_ids - %s' % (
function_str, err))
raise
for base_name in filter_by_metrics:
try:
metric_ids.append(int(metric_names_with_ids[base_name]))
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to add metric id to metric_ids for %s from metric_names_with_ids - %s' % (
function_str, base_name, err))
return (metric_ids, metric_names_with_ids)
def get_cloudbursts(metric, namespaces, from_timestamp, until_timestamp):
"""
Get create a dict of all the cloudbursts.
:param metric: the name of the metric
:param namespaces: the namespaces to match
:param from_timestamp: the from_timestamp
:param until_timestamp: the until_timestamp
:type metric: str
:type namespaces: list
:type from_timestamp: int
:type until_timestamp: int
:return: dict of cloudbursts
:rtype: {}
Returns a dict of cloudbursts
{
"cloudbursts": {
<id>: {
'metric_id': <int>,
'metric': <str>,
'timestamp': <int>,
'end': <int>,
'duration': <int>,
'duration': <int>,
'from_timestamp': <int>,
'resolution': <int>,
'full_duration': <int>,
'anomaly_id': <int>,
'match_id': <int>,
'fp_id': <int>,
'layer_id': <int>,
'added_at': <int>,
},
}
}
"""
function_str = 'get_cloudbursts'
cloudbursts_dict = {}
engine = None
metric_ids = []
use_filter_by_metrics = False
filter_by_metrics = []
metric_names_with_ids = {}
ids_with_metric_names = {}
logger.info(
'get_cloudbursts - metric: %s, namespaces: %s, from_timestamp: %s, until_timestamp: %s' % (
str(metric), str(namespaces), str(from_timestamp),
str(until_timestamp)))
if metric != 'all':
# @added 20221103 - Task #2732: Prometheus to Skyline
# Branch #4300: prometheus
use_metric = str(metric)
if metric.startswith('labelled_metrics.'):
use_metric = str(metric)
try:
metric = get_base_name_from_labelled_metrics_name(skyline_app, use_metric)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: get_base_name_from_labelled_metrics_name failed with %s - %s' % (
function_str, metric, err))
raise
logger.info(
'get_cloudbursts - looked up %s to metric: %s' % (
use_metric, str(metric)))
if not metric:
logger.error('error :: %s :: failed to look up metric' % function_str)
raise ValueError('failed to look up metric')
filter_by_metrics = [metric]
use_filter_by_metrics = True
if not namespaces:
namespaces = [metric]
if namespaces == ['all']:
namespaces = [metric]
try:
redis_conn_decoded = get_redis_conn_decoded(skyline_app)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: get_redis_conn_decoded failed - %s' % (
function_str, err))
raise
filter_by_metrics = get_filtered_metrics(redis_conn_decoded, namespaces)
if namespaces:
use_filter_by_metrics = True
if metric != 'all':
try:
metric_id = int(redis_conn_decoded.hget('aet.metrics_manager.metric_names_with_ids', metric))
if metric_id:
metric_names_with_ids[metric] = metric_id
metric_ids.append(metric_id)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get %s from Redis hash aet.metrics_manager.metric_names_with_ids - %s' % (
function_str, metric, err))
raise
metric_ids, metric_names_with_ids = get_metric_ids(redis_conn_decoded, filter_by_metrics)
if len(filter_by_metrics) > 1:
use_filter_by_metrics = True
for base_name in list(metric_names_with_ids.keys()):
metric_id = int(metric_names_with_ids[base_name])
ids_with_metric_names[metric_id] = base_name
try:
engine, log_msg, trace = get_engine(skyline_app)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get engine - %s' % (
function_str, err))
raise
try:
cloudburst_table, log_msg, trace = cloudburst_table_meta(skyline_app, engine)
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get cloudburst_table - %s' % (
function_str, err))
raise
try:
connection = engine.connect()
if use_filter_by_metrics:
stmt = select([cloudburst_table], cloudburst_table.c.metric_id.in_(metric_ids))
if from_timestamp > 0 and until_timestamp == 0:
stmt = select([cloudburst_table], cloudburst_table.c.metric_id.in_(metric_ids)).\
where(cloudburst_table.c.timestamp >= from_timestamp)
if from_timestamp == 0 and until_timestamp > 0:
stmt = select([cloudburst_table], cloudburst_table.c.metric_id.in_(metric_ids)).\
where(cloudburst_table.c.timestamp <= until_timestamp)
if from_timestamp > 0 and until_timestamp > 0:
stmt = select([cloudburst_table], cloudburst_table.c.metric_id.in_(metric_ids)).\
where(cloudburst_table.c.timestamp >= from_timestamp).\
where(cloudburst_table.c.timestamp <= until_timestamp)
else:
stmt = select([cloudburst_table])
if from_timestamp > 0 and until_timestamp == 0:
stmt = select([cloudburst_table]).\
where(cloudburst_table.c.timestamp >= from_timestamp)
if from_timestamp == 0 and until_timestamp > 0:
stmt = select([cloudburst_table]).\
where(cloudburst_table.c.timestamp <= until_timestamp)
if from_timestamp > 0 and until_timestamp > 0:
stmt = select([cloudburst_table]).\
where(cloudburst_table.c.timestamp >= from_timestamp).\
where(cloudburst_table.c.timestamp <= until_timestamp)
results = connection.execute(stmt)
for row in results:
# @modified 20230126
# Wrapped in try except
try:
cloudburst_id = row['id']
metric_id = row['metric_id']
cloudbursts_dict[cloudburst_id] = dict(row)
try:
cloudbursts_dict[cloudburst_id]['metric'] = ids_with_metric_names[metric_id]
except KeyError:
use_metric_name = 'labelled_metrics.%s' % str(metric_id)
logger.warning('warning :: %s :: failed to find metric name for metric id in ids_with_metric_names, using %s' % (
function_str, use_metric_name))
cloudbursts_dict[cloudburst_id]['metric'] = use_metric_name
except Exception as err:
logger.error('error :: %s :: failed to iterate row from cloudburst_table - %s' % (
function_str, err))
connection.close()
except Exception as err:
logger.error(traceback.format_exc())
logger.error('error :: %s :: failed to get cloudburst_table - %s' % (
function_str, err))
raise
# Reorder the dict keys for the page
cloudbursts_dict_keys = []
key_ordered_cloudbursts_dict = {}
if cloudbursts_dict:
cloudburst_ids = list(cloudbursts_dict.keys())
first_cloudburst_id = cloudburst_ids[0]
for key in list(cloudbursts_dict[first_cloudburst_id].keys()):
cloudbursts_dict_keys.append(key)
for cloudburst_id in cloudburst_ids:
key_ordered_cloudbursts_dict[cloudburst_id] = {}
for key in cloudbursts_dict[cloudburst_id]:
if key == 'id':
key_ordered_cloudbursts_dict[cloudburst_id][key] = cloudbursts_dict[cloudburst_id][key]
key_ordered_cloudbursts_dict[cloudburst_id]['metric'] = cloudbursts_dict[cloudburst_id]['metric']
for key in cloudbursts_dict[cloudburst_id]:
if key not in ['id', 'metric']:
key_ordered_cloudbursts_dict[cloudburst_id][key] = cloudbursts_dict[cloudburst_id][key]
cloudbursts_dict = key_ordered_cloudbursts_dict
cloudburst_ids = list(cloudbursts_dict.keys())
cloudburst_ids.reverse()
desc_cloudbursts_dict = {}
for c_id in cloudburst_ids:
desc_cloudbursts_dict[c_id] = cloudbursts_dict[c_id]
cloudbursts_dict = desc_cloudbursts_dict
logger.info('%s :: found %s cloudbursts' % (
function_str, str(len(list(cloudbursts_dict.keys())))))
return cloudbursts_dict
| [
"gary.wilson@of-networks.co.uk"
] | gary.wilson@of-networks.co.uk |
48304405d3ebc5b199122ef7bc0b215800dc6568 | a1cc77c55a2fc8a87c9ff72372dfd9ae2bf6e525 | /testandoSocket.py | 95a85234a3d9a7a4c8c1b192ab6323b3dd6edee6 | [] | no_license | vitorrios1001/projetoIntegrador | f591116c8eec4ea8909a043012085ed7d40191c2 | 6872b1465e2337ccdc4f519af5a354d35cb35b8c | refs/heads/master | 2021-08-23T12:31:31.163245 | 2017-12-04T22:32:25 | 2017-12-04T22:32:25 | 111,944,801 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,589 | py | import random
from time import sleep
from Queue import *
from threading import Thread, Lock
import httplib, urllib, base64, json
from flask import Flask, request
import requests
import socket
import os
import sys
import pickle
import backports
class Node(object):
def __init__(self, host, url):
self.host = host
self.url = url
global processamento
global fila
global processado
app = Flask(__name__)
@app.route("/processamento/", methods=['GET', 'POST'])
def processamento():
if request.method == "POST":
global processado
global processamento
global fila
fila= Queue(Node)
url = request.data
urlStr = str(url)
print(urlStr)
#r = requests.post('http://localhost:5000/node/', data = teste)
host1 = 'http://172.10.10.100:5000/node/'
host2 = 'http://172.10.10.102:5000/node/'
listaDeHosts = [host1,host2]
#Empilhando objetos node para processamento
for host in listaDeHosts:
print 'Colocando o host '+host+' na fila'
node = Node(host,urlStr)
fila.put(node)
print(fila)
processado = False
#Startando Hosts para processamento
for node in listaDeHosts:
teste = Thread(target=ProcessaImagem)
teste.setDaemon(True)
teste.start()
print 'Eu startei a thread com host: '+node
i = 0
while i < fila.qsize():
fila.join()
print "Finalizou"
processou = False
while not processou:
if processado == True:
r = processamento
processou = True
print(processamento)
return processamento
#funcao para consumir a fila
def ProcessaImagem():
global processado
global processamento
job = fila.get()
sleep(random.randint(1,3))
print 'Sou o host: '+job.host
r = requests.post(job.host, data = job.url )
if processado == False:
processado = True
resp = r.json()
jstr = json.dumps(resp,ensure_ascii=True, indent=2)
processamento = jstr
fila.queue.clear
print 'Pronto'
fila.task_done() #finaliza o job
if __name__ == "__main__":
app.run(debug=True, port=3000,host="172.10.10.102")
| [
"vitorluizrios@hotmail.com"
] | vitorluizrios@hotmail.com |
29fda71564cb74768b4e54cdb0a2d771dcd29ecd | 8767db24eb71eb0fa45a3c8ad6724d453ef8516c | /accounts/accounts/urls.py | 8652fae07013aec153295105377a73e3ece99435 | [] | no_license | grilhami/Django-Simple-Login-Signup | ea2c90915c9ecd1eefcc445bc542faff240a6d41 | bfa1c4a5de632f1d25de8c52cdd2197c641df136 | refs/heads/master | 2022-05-03T06:49:26.292363 | 2019-06-12T19:53:45 | 2019-06-12T19:53:45 | 191,609,026 | 0 | 0 | null | 2022-04-22T21:32:15 | 2019-06-12T16:37:44 | Python | UTF-8 | Python | false | false | 1,001 | py | """accounts URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.views.generic.base import TemplateView
urlpatterns = [
path('admin/', admin.site.urls),
path('accounts/', include('account_signup.urls')),
path('accounts/', include('django.contrib.auth.urls')),
path('', TemplateView.as_view(template_name='home.html'), name='home'),
]
| [
"gigilangilhami@gmail.com"
] | gigilangilhami@gmail.com |
8920f1e323513222adafbd77853f23a6f87ca1e2 | d3efc82dfa61fb82e47c82d52c838b38b076084c | /Autocase_Result/GPMM/YW_GPMM_SZSJ_287.py | dd5bdfe7fc378edf0289633f7e8debff4b950338 | [] | no_license | nantongzyg/xtp_test | 58ce9f328f62a3ea5904e6ed907a169ef2df9258 | ca9ab5cee03d7a2f457a95fb0f4762013caa5f9f | refs/heads/master | 2022-11-30T08:57:45.345460 | 2020-07-30T01:43:30 | 2020-07-30T01:43:30 | 280,388,441 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,063 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.append("/home/yhl2/workspace/xtp_test/xtp/api")
from xtp_test_case import *
sys.path.append("/home/yhl2/workspace/xtp_test/service")
from ServiceConfig import *
from mainService import *
from QueryStkPriceQty import *
from log import *
sys.path.append("/home/yhl2/workspace/xtp_test/mysql")
from CaseParmInsertMysql import *
sys.path.append("/home/yhl2/workspace/xtp_test/utils")
from QueryOrderErrorMsg import queryOrderErrorMsg
class YW_GPMM_SZSJ_287(xtp_test_case):
# YW_GPMM_SZSJ_287
def test_YW_GPMM_SZSJ_287(self):
title = '深A本方最优卖(卖出数量=可用股份数+100)'
# 定义当前测试用例的期待值
# 期望状态:初始、未成交、部成、全成、部撤已报、部撤、已报待撤、已撤、废单、撤废、内部撤单
# xtp_ID和cancel_xtpID默认为0,不需要变动
case_goal = {
'期望状态': '废单',
'errorID': 11010121,
'errorMSG': queryOrderErrorMsg(11010121),
'是否生成报单': '是',
'是否是撤废': '否',
'xtp_ID': 0,
'cancel_xtpID': 0,
}
logger.warning(title)
# 定义委托参数信息------------------------------------------
# 参数:证券代码、市场、证券类型、证券状态、交易状态、买卖方向(B买S卖)、期望状态、Api
stkparm = QueryStkPriceQty('003123', '2', '0', '2', '0', 'S', case_goal['期望状态'], Api)
# 如果下单参数获取失败,则用例失败
if stkparm['返回结果'] is False:
rs = {
'用例测试结果': stkparm['返回结果'],
'测试错误原因': '获取下单参数失败,' + stkparm['错误原因'],
}
self.assertEqual(rs['用例测试结果'], True)
else:
wt_reqs = {
'business_type': Api.const.XTP_BUSINESS_TYPE['XTP_BUSINESS_TYPE_CASH'],
'order_client_id':trade_type + 1,
'market': Api.const.XTP_MARKET_TYPE['XTP_MKT_SZ_A'],
'ticker': stkparm['证券代码'],
'side': Api.const.XTP_SIDE_TYPE['XTP_SIDE_SELL'],
'price_type': Api.const.XTP_PRICE_TYPE['XTP_PRICE_FORWARD_BEST'],
'price': stkparm['涨停价'],
'quantity': 100100,
'position_effect': Api.const.XTP_POSITION_EFFECT_TYPE['XTP_POSITION_EFFECT_INIT']
}
ParmIni(Api, case_goal['期望状态'], wt_reqs['price_type'])
CaseParmInsertMysql(case_goal, wt_reqs)
rs = serviceTest(Api, case_goal, wt_reqs)
logger.warning('执行结果为' + str(rs['用例测试结果']) + ','
+ str(rs['用例错误源']) + ',' + str(rs['用例错误原因']))
self.assertEqual(rs['用例测试结果'], True) # 0
if __name__ == '__main__':
unittest.main()
| [
"418033945@qq.com"
] | 418033945@qq.com |
7ddfbc1bd0cd3d5dbdcf9c8efc5b97a8801d55d8 | a3f27a123e0161b21baf1a077f10ae2114e88371 | /DataCleaner.py | 05412ae5cfc98b1765d4f31ed61c907ab382d7cf | [] | no_license | davidschulte/bads2021 | a7446a0939e765630f86043f80880eafab2437e8 | 385e3fe665ea905397e01748d8116255ceb54cf1 | refs/heads/main | 2023-08-17T17:24:05.465080 | 2021-10-01T14:23:16 | 2021-10-01T14:23:16 | 412,450,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,286 | py | import pandas as pd
import numpy as np
class DataCleaner:
def __init__(self):
self.cat_features = ['item_id', 'item_size', 'item_color', 'brand_id', 'user_id', 'user_title', 'user_state']
self.int_features = []
self.float_features = ['item_price']
self.date_features = ['order_date', 'delivery_date', 'user_dob', 'user_reg_date']
self.target = 'return'
def convert_data_types(self, data):
for feature in self.cat_features:
data[feature] = data[feature].astype('category')
for feature in self.date_features:
data[feature] = pd.to_datetime(data[feature])
#data[self.target] = data[self.target].astype('bool')
return data
def create_features_oneline(self, data):
data['delivery_span'] = (data['delivery_date'] - data['order_date']).dt.days
data['user_age'] = (data['order_date'] - data['user_dob']).dt.days / 365
month_days = []
for day in data['order_date']:
month_days.append(day.day)
data['order_month_days'] = month_days
#weekday!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
return data
def create_features_multiline(self, data):
data['order_num_items'] = data.groupby(['user_id', 'order_date'])['user_id'].transform('size').astype(
np.int32)
data['order_num_spec_item'] = data.groupby(['user_id', 'order_date', 'item_id'])['user_id'].transform(
'size').astype(np.int32)
data['order_sum'] = data.groupby(['user_id', 'order_date'])['item_price'].transform('sum').astype(np.float32)
data['user_orders'] = data.groupby(['user_id'])['user_id'].transform('size').astype(np.float32)
data['user_return_rate'] = data.groupby(['user_id'])['return'].transform('mean').astype(np.float32)
data['item_popularity'] = data.groupby(['item_id'])['item_id'].transform('size').astype(np.int32)
data['brand_popularity'] = data.groupby(['brand_id'])['brand_id'].transform('size').astype(np.int32)
data['brand_discount'] = data.groupby(['brand_id'])['item_price'].transform('mean').astype(np.float32) - data[
'item_price']
data['item_color_popularity'] = data.groupby(['item_color'])['item_color'].transform('size').astype(np.int32)
data['item_return_rate'] = data.groupby(['item_id'])['return'].transform('mean').astype(np.float32)
data['color_return_rate'] = data.groupby(['item_color'])['return'].transform('mean').astype(np.float32)
return data
def clean(self, data):
data.loc[data['user_dob'] < "1920-01-01", 'user_dob'] = np.nan
data.loc[data['user_dob'] > "2010-01-01", 'user_dob'] = np.nan
data.loc[data['user_dob'].isnull(), 'user_dob'] = data['user_dob'].quantile(0.5, interpolation="midpoint")
data.loc[data['delivery_span'].isnull(), 'delivery_span'] = data['delivery_span'].quantile(0.5, interpolation="midpoint")
return data
def change_after_multiline(self, data):
data.drop(self.cat_features+self.date_features, axis=1, inplace=True)
return data
from scipy.stats import chi2_contingency
def optimize_grouping(self, cat_feature, target_feature):
'''
Compares differenct encodings of a categorical variable using Chi^2 test.
Input:
- cat_feature: categorical feature to be encoded
- target_feature: target feature
Output:
- vector of Chi^2 statistic values
'''
# Copying features to avoid editing the original DataFrame
cat_feature = cat_feature.copy()
target_feature = target_feature.copy()
# Checking if feature is categorical
if cat_feature.dtype != 'category':
print('Input feature is not categorical. Received feature of type:', cat_feature.dtype)
return
# Placeholders for Chi^2 values and categories
stats = []
cats = []
cats_num = []
# Storing number and values of categories
n_unique = cat_feature.nunique()
cats_num.append(n_unique)
cats.append(cat_feature.cat.categories)
# Performing chi2 test
ct = pd.crosstab(cat_feature, target_feature)
stat, _, _, _ = chi2_contingency(ct)
stats.append(stat)
# Iteratively trying different groupings
for i in range(n_unique - 1):
# Computing odds ratio
ct = pd.crosstab(cat_feature, target_feature)
ct['odds_ratio'] = ct[0] / ct[1]
# Finding min odds ratio difference
ct = ct.sort_values('odds_ratio')
ct['odds_ratio_diff'] = ct['odds_ratio'].diff()
min_idx = np.where(ct['odds_ratio_diff'] == ct['odds_ratio_diff'].min())[0][0]
# Storing levels to merge
levels_to_merge = ct.iloc[(min_idx - 1):(min_idx + 1)].index.values
# Merging two categories with add_categories()
cat_feature.cat.add_categories(['+'.join(str(levels_to_merge))], inplace=True)
for level in levels_to_merge:
cat_feature.loc[cat_feature == level] = '+'.join(str(levels_to_merge))
cat_feature.cat.remove_categories([level], inplace=True)
# Storing number and values of categories after encoding
cats_num.append(cat_feature.nunique())
cats.append(cat_feature.cat.categories)
# Performing chi2 test
ct = pd.crosstab(cat_feature, target_feature)
stat, _, _, _ = chi2_contingency(ct)
stats.append(stat)
# Plotting results
import matplotlib.pyplot as plt
plt.plot(cats_num, stats)
plt.title('Chi^2 Elbow Curve')
plt.ylabel('Chi^2 statistic')
plt.xlabel('Number of categories')
plt.show()
# Printing encodings
for i in range(len(cats)):
print('- {} categories: {}'.format(cats_num[i], cats[i].values))
# Returning Chi^2 values and encodings
return stats, cats | [
"noreply@github.com"
] | noreply@github.com |
89bf67b45eaebe9ba7f531e31d809e0c6a5feea7 | 5d93e2a138b7fc111a68252c036534bf749a956b | /manage.py | ea51b8134ea6da9d8bfc232e7f3f4e11bca729e0 | [] | no_license | amanraj209/tweet-me | ea58b64689028cf8608a2b7284c8500ffe57ef2f | 118a2cb983fa903120e7a9a9e35ec17d13431554 | refs/heads/master | 2021-08-23T22:05:19.142048 | 2017-12-06T19:37:07 | 2017-12-06T19:37:07 | 112,243,448 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "TweetMe.settings")
try:
from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
| [
"amanraj20996@gmail.com"
] | amanraj20996@gmail.com |
0ef8ccc6560e9f1ebf5c4a870ac133a9b3aa372a | 8d31e001f8b053f3083bf0c69566d32d9474072e | /article/migrations/0017_likearticle_likecomment.py | 430488f48f0dd550663bccc266bcb9f75db96ed4 | [] | no_license | Kunduzha/Labwork_LikeForArticle | 1eff23e7bbcf1cc19a9c85be9f5a9a5ffbe6569e | c0e2855a4fea204ec94e2256750b9ba20cfbd7be | refs/heads/master | 2023-04-20T10:48:24.239504 | 2021-05-24T11:47:31 | 2021-05-24T11:47:31 | 368,871,864 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,860 | py | # Generated by Django 3.1.6 on 2021-05-18 10:35
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('article', '0016_auto_20210412_1456'),
]
operations = [
migrations.CreateModel(
name='LikeComment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='like_comment', to='article.comment', verbose_name='комментарий')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_like_comment', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'лайк',
'verbose_name_plural': 'лайки',
'db_table': 'like_comment',
},
),
migrations.CreateModel(
name='LikeArticle',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('article', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='like_article', to='article.article', verbose_name='Статья')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='user_like_article', to=settings.AUTH_USER_MODEL)),
],
options={
'verbose_name': 'лайк',
'verbose_name_plural': 'лайки',
'db_table': 'like_article',
},
),
]
| [
"kuzanai@mail.ru"
] | kuzanai@mail.ru |
dfd2c821a4904ec8151b78f22aacd5f87e3ee632 | b97eba3a0b24d2393a517eb19fad0136edcee174 | /dataset/iterator_wrapper.py | 1147681e73d6899d1e91a83a87886601d4011543 | [] | no_license | Z-K-Zhong/Detection | f093c0ef18edff7ed64e11dbcab827539d502be6 | 04a2c319b7412be6ba20ed904f661d69b91361f0 | refs/heads/master | 2020-06-04T15:03:07.771943 | 2019-06-15T11:59:32 | 2019-06-15T11:59:32 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,170 | py | """For loading data into Faster-RCNN models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import tensorflow as tf
__all__ = ["get_infer_iterator_wrapper", "get_iterator_wrapper",
"process_image"]
class DataWrapper(
collections.namedtuple("DataWrapper",
("initializer",
"images_data",
"images_size",
"bbox_locations"))):
pass
def process_image(image, image_format="jpeg"):
if image_format == "jpeg":
image = tf.image.decode_jpeg(image)
elif image_format == "png":
image = tf.image.decode_png(image)
return tf.image.convert_image_dtype(image, dtype=tf.float32)
def get_iterator_wrapper(src_dataset,
batch_size,
image_format="jpeg",
num_parallel_calls=4,
output_buffer_size=10,
image_feature="image/data",
size_feature="image/size",
coord_xl_feature="bbox/locations/x_l",
coord_yt_feature="bbox/locations/y_t",
coord_xr_feature="bbox/locations/x_r",
coord_yb_feature="bbox/locations/y_b",
categories_feature="bbox/categories"):
def get_feature_description(image_feature_, size_feature_,
coord_xl_feature_, coord_yt_feature_,
coord_xr_feature_, coord_yb_feature_,
categories_feature_):
context_features_proto = {
image_feature_: tf.FixedLenFeature([], dtype=tf.string)
}
sequence_features_proto = {
coord_xl_feature_: tf.FixedLenSequenceFeature([], dtype=tf.float32),
coord_yt_feature_: tf.FixedLenSequenceFeature([], dtype=tf.float32),
coord_xr_feature_: tf.FixedLenSequenceFeature([], dtype=tf.float32),
coord_yb_feature_: tf.FixedLenSequenceFeature([], dtype=tf.float32),
categories_feature_: tf.FixedLenSequenceFeature([], dtype=tf.int64),
size_feature_: tf.FixedLenSequenceFeature([], dtype=tf.int64),
}
return context_features_proto, sequence_features_proto
context_features, sequence_features = get_feature_description(
image_feature, size_feature,
coord_xl_feature, coord_yt_feature,
coord_xr_feature, coord_yb_feature, categories_feature)
# fetch context and sequence from record
src_dataset = src_dataset.map(
lambda x: (tf.parse_single_sequence_example(
x, context_features, sequence_features)),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
# Data argument or normalize?
# fetch image data, height, width, bbox category, overlap, coordinate from record
src_dataset = src_dataset.map(
lambda context, sequence: (
process_image(context[image_feature], image_format),
tf.to_int32(sequence[size_feature]),
tf.transpose([sequence[coord_xl_feature], sequence[coord_yt_feature],
sequence[coord_xr_feature], sequence[coord_yb_feature],
tf.to_float(sequence[categories_feature])])),
num_parallel_calls=num_parallel_calls).prefetch(output_buffer_size)
if batch_size > 1:
print("Model only train and test in batch_size is 1")
batch_size = 1
def batching_func(x):
return x.padded_batch(
batch_size,
padded_shapes=(
[-1, -1, 3],
[3],
[-1, 5]),
padding_values=(
-1., 0, -1.))
src_dataset = batching_func(src_dataset)
# src_dataset = src_dataset.batch(batch_size)
batched_iter = src_dataset.make_initializable_iterator()
(img_data, img_size, bbox_locations) = batched_iter.get_next()
# bbox_locations = tf.transpose(bbox_locations)
return DataWrapper(
initializer=batched_iter.initializer,
images_data=img_data,
images_size=tf.squeeze(img_size, axis=0),
# Contain bbox categories in last dim
bbox_locations=bbox_locations)
def get_infer_iterator_wrapper(src_dataset,
batch_size,
image_format="jpeg"):
# Data argument or normalize?
# batch
if batch_size > 1:
print("Model only train and test in batch_size is 1")
batch_size = 1
batched_dataset = src_dataset.batch(batch_size)
batched_iter = batched_dataset.make_initializable_iterator()
img_data, img_size = batched_iter.get_next()
img_data = process_image(img_data, image_format)
return DataWrapper(
initializer=batched_iter.initializer,
images_data=img_data,
images_size=tf.squeeze(img_size, axis=0),
bbox_locations=None)
| [
"noreply@github.com"
] | noreply@github.com |
0f5bdbf5ebeef0d600a6606d1ba3dee3a270cb8d | 2337c000ff2710460e7b65729c57d8469e200aa6 | /apps/portals/applications/account/urls.py | 5f0ea306f0c7ad62a6d82458b51cb8139cf93977 | [] | no_license | Phoenix25/fest-api | 9b7ad42b843dc1e2e65b6753e21cf88c8925c5c1 | ddb3110b160f83f4a6d8a8606874217778b99069 | refs/heads/master | 2021-01-18T13:36:02.366482 | 2014-04-18T12:50:50 | 2014-04-18T12:50:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
from django.conf.urls import patterns, include, url
from apps.portals.applications.account.views import *
from django.contrib.auth.views import password_change
from django.conf import settings
urlpatterns = patterns('',
url(r'^password_change/$', password_change,{'post_change_redirect':settings.SITE_URL}),
url(r'^login/$', 'apps.portals.applications.account.views.login', name='login'),
url(r'^register/$', 'apps.portals.applications.account.views.register', name='home'),
url(r'^logout/$', 'apps.portals.applications.account.views.logout', name='logout'),
url(r'^editprofile/$', 'apps.portals.applications.account.views.editprofile', name='editprofile'),
)
| [
"saipraveenb25@gmail.com"
] | saipraveenb25@gmail.com |
96177942765b2f52d5e55fed685ed759a48ced11 | 90ad3f487bb9d7b8a59aee1a2591bfa338ecf63e | /Block_Chain/Define/Account.py | 7eff69306f30ceef0221ae487c867e2d887e0bf1 | [] | no_license | ajax-2/BlockChain_tornado | 2d25a8ce1f4f2ced005ef10dfb550ee039cd3c98 | 797bb122c7f279f586403af5348a53f831de201a | refs/heads/master | 2020-03-18T23:31:25.721648 | 2018-06-01T07:23:08 | 2018-06-01T07:23:08 | 135,407,911 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | import hashlib
class Account(object):
def __init__(self, name, signature, timestamp, data):
self.name = name
self.signature = signature
self.timestamp = timestamp
self.data = data
self.balance = 0
self.hash = None
def user_hash(self):
sha = hashlib.sha256()
sha.update((str(self.name) +
str(self.signature) +
str(self.timestamp) +
str(self.data) +
str(self.balance)
).encode())
return sha.hexdigest()
| [
"Allence@xxx.com"
] | Allence@xxx.com |
16f8ed72f0a4fee3f6f9d76191a5fc8ccb024581 | 78435005fd4ddf72579eef4d253b66128120745c | /srs_human_sensing/src/srs_human_sensing/hs.py | 6a8d75a2622b1c2ed0220fd829b49fee316d132d | [] | no_license | Mazet/srs_public | 57290f60dd9e3de2a11a6ef8774c3cf27dfbd342 | 4342d93ae44de2ef0dd9450679c667cad1701615 | refs/heads/master | 2021-01-17T22:25:07.604843 | 2012-07-19T08:50:36 | 2012-07-19T08:50:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,621 | py | #!/usr/bin/env python
import roslib; roslib.load_manifest('srs_human_sensing')
import rospy
import math
from std_msgs.msg import String
from sensor_msgs.msg import PointCloud
from geometry_msgs.msg import Point32
from cob_people_detection_msgs.msg import PeopleDetectionArray
import tf
from std_msgs.msg import Header
def callback_LD(data):
rospy.loginfo("I heard from LD:")
leg_detections = data.points
global legs
legs = []
for i in range (len(leg_detections)):
legs.append(leg_detections[i])
print leg_detections[i].x,",",leg_detections[i].y
match_detections()
def callback_FD(data):
rospy.loginfo("I heard from FD:")
face_detections = data.detections
global faces
faces = []
for i in range (len(face_detections)):
try:
detection1 = tflistener.transformPose('map', face_detections[i].pose)
newpoint = Point32(detection1.pose.position.x,detection1.pose.position.y,detection1.pose.position.z)
faces.append(newpoint)
except (tf.LookupException, tf.ConnectivityException, tf.ExtrapolationException):
print "tf transform error"
match_detections ()
def match_detections ():
distances = []
global legs
global faces
global pub
pc = PointCloud()
pc.header = Header(0,rospy.get_rostime(),'/map')
for m in range (len(faces)):
pc.points.append(Point32(faces[m].x,faces[m].y,1.5))
for l in range (len(legs)):
pc.points.append(Point32(legs[l].x,legs[l].y,1.0))
distancecol = []
for m in range (len(faces)):
# measure the distance between the detections and store them
dist = (sqrt((faces[m].x-legs[l].x)**2+(faces[m].y-legs[l].y)**2))
distancecol.append (dist)
distances.append (distancecol)
print "distances"
print distances
pub.publish (pc)
def listener():
rospy.Subscriber("leg_detections_cloud", PointCloud , callback_LD)
rospy.Subscriber("/cob_people_detection/face_position_array", PeopleDetectionArray , callback_FD)
rospy.spin()
if __name__ == '__main__':
print "Listening ..."
rospy.init_node('SRS_HS_listener', anonymous=True)
legs = []
faces = []
tflistener = tf.TransformListener()
tflistener.waitForTransform("/head_cam3d_link", "/map", rospy.Time(), rospy.Duration(5.0))
pub = rospy.Publisher('people_detections_cloud', PointCloud)
listener()
| [
"NoyvirtA@cf.ac.uk"
] | NoyvirtA@cf.ac.uk |
9cf4bcddbc8314500149e0a523d8dd5b7deec3b4 | db24008c2fb9691e5410acc6e587e70af431042e | /Modbus/ModbusConnector.py | a231a40c5043758e96091f204b7fadcdc9c62d83 | [] | no_license | gabrieldinse/package-cover-offset | 0dbd4254dceae109c455b8a2798ebe065ff0ffcc | 79b80d13ff2d14af396de921a281bb0a3150b96b | refs/heads/master | 2023-02-09T06:50:06.046198 | 2020-12-30T10:10:48 | 2020-12-30T10:10:48 | 322,862,679 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,232 | py | # Standard Library
import time
# Third party modules
import minimalmodbus
# Local application imports
class ModbusConnector(minimalmodbus.Instrument):
def __init__(self, port: str='COM3', slaveaddress: int=1,
close_port_after_each_call: bool=False, debug: bool=False):
minimalmodbus.Instrument.__init__(self, port, slaveaddress)
self.serial.baudrate = 19200
self.serial.bytesize = 8
self.serial.stopbits = 1
self.serial.timeout = 1 # segundos
self.mode = 'rtu'
self.close_port_after_each_call = close_port_after_each_call
self.debug = debug
self.usb_on = True
self.register = 0
if self.debug:
print(self)
time.sleep(2)
def send_offset(self, offset: float) -> None:
if self.usb_on:
self.write_register(0, offset if offset >= 0 else offset + 65535)
if self.debug:
print(f'Offset {offset}mm enviado com sucesso')
def read_offset(self) -> None:
if self.usb_on:
test_reg = self.read_registers(0, 1, 4)
if self.debug:
print(f'Recebido offset {test_reg}')
| [
"gabriel_dinse@hotmail.com"
] | gabriel_dinse@hotmail.com |
297d73c6b6d28adb9888b309f6b1409185969185 | afc6872c108922e88451b55195e7d85883a88ba4 | /userop/urls.py | 106c4e8ae8f1e69547618439c30a487ef07a36cd | [] | no_license | prashantkrishna5/django-token-based | 41686b0b1b85213db2d401cff1f8a24f46bd92e1 | 60e5f0ab58cf9600a1b12f364d140190332f71b8 | refs/heads/master | 2022-11-18T17:10:34.698602 | 2020-07-12T14:13:29 | 2020-07-12T14:13:29 | 278,598,595 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | from django.contrib import admin
from django.urls import path, include
from . import views
from userop.views import(LoginView, LogoutView, Home, AddData)
urlpatterns = [
path('',Home.as_view()),
path('api/v1/add', AddData.as_view()),
path('api/v1/auth/login/',LoginView.as_view()),
path('api/v1/auth/logout/',LogoutView.as_view())
]
| [
"krishnapuram.babu@I2LT385.systems.bgr.ionidea.com"
] | krishnapuram.babu@I2LT385.systems.bgr.ionidea.com |
fa793e4c86c96cb49ded7b33210f7a87f01a60d5 | f5625da9dd2f2f485c5be71ca578c5c2349d491c | /vendas/views.py | 05ec834d26caeec3136ccf2e7306a31c00199c5f | [] | no_license | PinneappleTech/api-loja | 3a3035a88096f0d93e9f7a83c57324be7ff99971 | 9e93603dffc186ec11503df550e2ae41a1c5fcfe | refs/heads/master | 2022-12-22T10:44:23.449671 | 2020-09-26T18:16:52 | 2020-09-26T18:16:52 | 271,883,890 | 0 | 0 | null | 2020-09-26T18:16:54 | 2020-06-12T20:24:13 | Python | UTF-8 | Python | false | false | 1,622 | py | from django.shortcuts import render
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from .serializers import VendaCreateSerializer, VendaSerializer
from .models import Venda, ItemVenda
# Create your views here.
class VendaList(APIView):
"""
Recurso para listar e criar Vendas
"""
def get(self, request):
vendas = Venda.objects.all()
serializer = VendaSerializer(vendas, many=True)
return Response(serializer.data)
def post(self, request):
serializer = VendaCreateSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class VendaDetail(APIView):
"""
Recurso para buscar, atualizar e deletar Vendas
"""
def get(self, request, pk):
venda = get_object_or_404(Venda, pk=pk)
serializer = VendaSerializer(venda)
return Response(serializer.data)
def patch(self, request, pk):
venda = get_object_or_404(Venda, pk=pk)
serializer = VendaCreateSerializer(venda, data=request.data, partial=True)
if serializer.is_valid():
serializer.save()
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
venda = get_object_or_404(Venda, pk=pk)
venda.delete()
return Response(status=status.HTTP_204_NO_CONTENT) | [
"christianoliveirati@gmail.com"
] | christianoliveirati@gmail.com |
44d44a091d0def58363d3444fd132d8c03fea72f | 8a5dc3ec41dcd8a40fc3636a2d92f2b0913f5a28 | /models.py | 60557b2e19c19936577bf7aa48f5cd56ca425bf7 | [] | no_license | himanshu7118/crud | a8427f34acdd8a4f1a27b2c80645015626f80420 | 64063a102dbf98f3c4a0f2f0725d1a5b90e2a279 | refs/heads/main | 2023-02-23T12:04:32.008728 | 2021-01-19T19:10:37 | 2021-01-19T19:10:37 | 331,081,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 413 | py | from django.db import models
from django.urls import reverse
# Create your models here.
class School(models.Model):
name = models.CharField(max_length=255)
principle = models.CharField(max_length=255)
location = models.CharField(max_length=255)
def __str__(self):
return self.name
def get_absolute_url(self):
return reverse('hello:detail',kwargs={'pk':self.pk}) | [
"noreply@github.com"
] | noreply@github.com |
54ec4f1076e6c92ee51eb614731240891c4fef2c | 3fdbf2381b106636a1f1f6ad25985a4ab8dc9317 | /manage.py | 77fee6075fbb7036f67605fdeaf22a3e0684a263 | [] | no_license | alex-dsouza777/User-Authentication-Django | c1da52cca5cb152fb63ac9f30a08079cce4dbb23 | 3780b0a6bf7ff6943f2e01f87a2cfc5bd6bccebd | refs/heads/master | 2023-04-10T18:55:23.417678 | 2021-04-24T16:52:19 | 2021-04-24T16:52:19 | 343,179,991 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 665 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Userlogin.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"alexdsouza00777@gmail.com"
] | alexdsouza00777@gmail.com |
0e8c29073521b898bc3ef2206452c18f54e91d2e | b8e3363a40bc9928ae85c16232c5bf6240597a18 | /out/production/home-assistant/components/sensor/glances.py | eb38e3df265ff3d70494957f007ddc6013f2039d | [
"MIT"
] | permissive | LaurentTrk/home-assistant | 4cbffd5a71f914e003918542319bc6caa96dbb72 | 5a808d4e7df4d8d0f12cc5b7e6cff0ddf42b1d40 | refs/heads/dev | 2021-01-15T23:02:38.147063 | 2016-05-15T12:21:52 | 2016-05-15T12:21:52 | 51,471,180 | 2 | 0 | null | 2016-02-10T20:49:47 | 2016-02-10T20:49:47 | null | UTF-8 | Python | false | false | 6,117 | py | """
homeassistant.components.sensor.glances
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Gathers system information of hosts which running glances.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/sensor.glances/
"""
from datetime import timedelta
import logging
import requests
from homeassistant.util import Throttle
from homeassistant.helpers.entity import Entity
from homeassistant.const import STATE_UNKNOWN
_LOGGER = logging.getLogger(__name__)
_RESOURCE = '/api/2/all'
CONF_HOST = 'host'
CONF_PORT = '61208'
CONF_RESOURCES = 'resources'
SENSOR_TYPES = {
'disk_use_percent': ['Disk Use', '%'],
'disk_use': ['Disk Use', 'GiB'],
'disk_free': ['Disk Free', 'GiB'],
'memory_use_percent': ['RAM Use', '%'],
'memory_use': ['RAM Use', 'MiB'],
'memory_free': ['RAM Free', 'MiB'],
'swap_use_percent': ['Swap Use', '%'],
'swap_use': ['Swap Use', 'GiB'],
'swap_free': ['Swap Free', 'GiB'],
'processor_load': ['CPU Load', None],
'process_running': ['Running', None],
'process_total': ['Total', None],
'process_thread': ['Thread', None],
'process_sleeping': ['Sleeping', None]
}
_LOGGER = logging.getLogger(__name__)
# Return cached results if last scan was less then this time ago
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
# pylint: disable=unused-variable
def setup_platform(hass, config, add_devices, discovery_info=None):
""" Setup the Glances sensor. """
host = config.get(CONF_HOST)
port = config.get('port', CONF_PORT)
url = 'http://{}:{}{}'.format(host, port, _RESOURCE)
var_conf = config.get(CONF_RESOURCES)
if None in (host, var_conf):
_LOGGER.error('Not all required config keys present: %s',
', '.join((CONF_HOST, CONF_RESOURCES)))
return False
try:
response = requests.get(url, timeout=10)
if not response.ok:
_LOGGER.error('Response status is "%s"', response.status_code)
return False
except requests.exceptions.MissingSchema:
_LOGGER.error("Missing resource or schema in configuration. "
"Please check the details in the configuration file.")
return False
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to resource/endpoint: '%s'. "
"Please check the details in the configuration file.",
url)
return False
rest = GlancesData(url)
dev = []
for resource in var_conf:
if resource not in SENSOR_TYPES:
_LOGGER.error('Sensor type: "%s" does not exist', resource)
else:
dev.append(GlancesSensor(rest, config.get('name'), resource))
add_devices(dev)
class GlancesSensor(Entity):
""" Implements a Glances sensor. """
def __init__(self, rest, name, sensor_type):
self.rest = rest
self._name = name
self.type = sensor_type
self._state = STATE_UNKNOWN
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self.update()
@property
def name(self):
""" The name of the sensor. """
if self._name is None:
return SENSOR_TYPES[self.type][0]
else:
return '{} {}'.format(self._name, SENSOR_TYPES[self.type][0])
@property
def unit_of_measurement(self):
""" Unit the value is expressed in. """
return self._unit_of_measurement
# pylint: disable=too-many-branches, too-many-return-statements
@property
def state(self):
""" Returns the state of the resources. """
value = self.rest.data
if value is not None:
if self.type == 'disk_use_percent':
return value['fs'][0]['percent']
elif self.type == 'disk_use':
return round(value['fs'][0]['used'] / 1024**3, 1)
elif self.type == 'disk_free':
try:
return round(value['fs'][0]['free'] / 1024**3, 1)
except KeyError:
return round((value['fs'][0]['size'] -
value['fs'][0]['used']) / 1024**3, 1)
elif self.type == 'memory_use_percent':
return value['mem']['percent']
elif self.type == 'memory_use':
return round(value['mem']['used'] / 1024**2, 1)
elif self.type == 'memory_free':
return round(value['mem']['free'] / 1024**2, 1)
elif self.type == 'swap_use_percent':
return value['memswap']['percent']
elif self.type == 'swap_use':
return round(value['memswap']['used'] / 1024**3, 1)
elif self.type == 'swap_free':
return round(value['memswap']['free'] / 1024**3, 1)
elif self.type == 'processor_load':
return value['load']['min15']
elif self.type == 'process_running':
return value['processcount']['running']
elif self.type == 'process_total':
return value['processcount']['total']
elif self.type == 'process_thread':
return value['processcount']['thread']
elif self.type == 'process_sleeping':
return value['processcount']['sleeping']
def update(self):
""" Gets the latest data from REST API. """
self.rest.update()
# pylint: disable=too-few-public-methods
class GlancesData(object):
""" Class for handling the data retrieval. """
def __init__(self, resource):
self._resource = resource
self.data = dict()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
""" Gets the latest data from the Glances REST API. """
try:
response = requests.get(self._resource, timeout=10)
self.data = response.json()
except requests.exceptions.ConnectionError:
_LOGGER.error("No route to host/endpoint '%s'. Is device offline?",
self._resource)
self.data = None
| [
"laurent.turek_github@gadz.org"
] | laurent.turek_github@gadz.org |
6e7869217a349653b244c104e40f724127c521b0 | e24530d5201dd0062c8495c29ded4dfdceb0b40c | /KI-Ecommerce/TestMitArray.py | 17cb1b466021a8b39a7d116cbaedc9992309bdce | [] | no_license | LarsLemke/E-Comm-Abgabe | 382aa86c08b95209606874fce45c1e014f06386d | 492acbd1dba9c145141c473a75e708ebfafe2c05 | refs/heads/master | 2023-02-27T20:14:57.924071 | 2021-02-08T16:09:22 | 2021-02-08T16:09:22 | 337,105,575 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 999 | py | import numpy as np
from keras.models import Sequential
from keras.layers import Dense
training_inputs = np.array([[0, 0.5, 1, 0, 1, 1],
[1, 1, 1, 0, 0, 0],
[1, 0, 1, 1, 1, 0],
[0, 1, 1, 0, 1, 1]])
training_outputs = np.array([[0, 1, 1, 0]]).T
test_inputs = np.array([[1, 0, 1, 1, 1, 1],
[1, 1, 1, 0, 0, 0],
[0, 0, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1]])
test_outputs = np.array([[1, 0, 0, 1]]).T
classifier = Sequential() # Initialisierung des ANN
classifier.add(Dense(units=6, activation='relu', input_dim=6))
classifier.add(Dense(units=3, activation='relu'))
classifier.add(Dense(units=1, activation='sigmoid'))
classifier.compile(optimizer='adam', loss='binary_crossentropy')
classifier.fit(training_inputs, training_outputs, batch_size=1, epochs=3)
predictions = classifier.predict(test_inputs)
print(np.argmax(predictions, axis=1))
| [
"lemke.lars@outlook.de"
] | lemke.lars@outlook.de |
628c00eb06fb0f9dc84aeeeefb6f31cdcfea0e8f | 4695656c1810c87f27845616ff8ce7d918d847b5 | /euler/py/src/e004.py | e8a6215695c59ad65214c835bc1536b12deee1c8 | [
"MIT"
] | permissive | protago90/hacker-quest | 3b90180aeeade277b13ee2b01528e551a6a9d790 | 7593f7a6453c1305e7d37a55f2bc94fc7afc6773 | refs/heads/main | 2022-02-01T07:58:34.687916 | 2021-12-28T19:18:02 | 2021-12-28T19:18:02 | 186,469,475 | 0 | 0 | null | 2019-08-08T20:27:04 | 2019-05-13T17:52:21 | Python | UTF-8 | Python | false | false | 859 | py | #!/usr/bin/env python
# by protago90
from src.utils import promptify
def run_e4v1(x: int) -> int:
rec = 1
t, tt = 10**(x-1), 10**x - 1
for n in range(t, tt):
for m in range(n, tt):
p = n * m
if p == int(str(p)[::-1]) & p > rec:
rec = p
return rec
@promptify
def solve_e4() -> int:
'''
Largest polindrome product
A palindromic number reads the same both ways. The largest palindrome made from
the product of two 2-digit numbers is 9009 = 91 × 99.
> Find the largest palindrome made from the product of two 3-digit numbers.
'''
return run_e4v1(3)
if __name__ == '__main__':
solve_e4()
# >> the anwser for the #4 euler problem is >906609<; computed in 0.2108s ∎
# from src.utils import timeitfy
# timeitfy([run_e4v1], args=[3], i=10000)
| [
"mg.protago@gmail.com"
] | mg.protago@gmail.com |
79fb8ba6a83387a576801361beb2514c1856416d | 0c341392f4060acb4a7f95a55d6201d36cde94c4 | /codewars/kata/fundamentals/6-kyu/dubstep-551dc350bf4e526099000ae5/code.py | bf66a458ad774ef7c7d704ce9bd1a908c2b2b175 | [] | no_license | fernandoe/study | fe4692c6c3dfaa5b9775ee731ad78da3848a5014 | 23ec748994ccd9484a84653274c0a65997de1dbd | refs/heads/master | 2023-04-16T14:46:23.552739 | 2023-04-12T01:29:37 | 2023-04-12T01:29:37 | 134,962,022 | 0 | 0 | null | 2023-01-19T23:34:23 | 2018-05-26T13:05:35 | Python | UTF-8 | Python | false | false | 2,271 | py | def song_decoder(song):
decoded = " ".join(song.split("WUB"))
return " ".join(decoded.split()) # The second split remove the double spaces
if __name__== "__main__":
import codewars_test as test
# Sample tests
test.assert_equals(song_decoder("AWUBBWUBC"), "A B C","WUB should be replaced by 1 space")
test.assert_equals(song_decoder("AWUBWUBWUBBWUBWUBWUBC"), "A B C","multiples WUB should be replaced by only 1 space")
test.assert_equals(song_decoder("WUBAWUBBWUBCWUB"), "A B C","heading or trailing spaces should be removed")
# Test Cases
test.assert_equals(song_decoder("RWUBWUBWUBLWUB"), "R L")
test.assert_equals(song_decoder("WUBJKDWUBWUBWBIRAQKFWUBWUBYEWUBWUBWUBWVWUBWUB"), "JKD WBIRAQKF YE WV")
test.assert_equals(song_decoder("WUBKSDHEMIXUJWUBWUBRWUBWUBWUBSWUBWUBWUBHWUBWUBWUB"), "KSDHEMIXUJ R S H")
test.assert_equals(song_decoder("QWUBQQWUBWUBWUBIWUBWUBWWWUBWUBWUBJOPJPBRH"), "Q QQ I WW JOPJPBRH")
test.assert_equals(song_decoder("WUBWUBOWUBWUBWUBIPVCQAFWYWUBWUBWUBQWUBWUBWUBXHDKCPYKCTWWYWUBWUBWUBVWUBWUBWUBFZWUBWUB"), "O IPVCQAFWY Q XHDKCPYKCTWWY V FZ")
test.assert_equals(song_decoder("WUBYYRTSMNWUWUBWUBWUBCWUBWUBWUBCWUBWUBWUBFSYUINDWOBVWUBWUBWUBFWUBWUBWUBAUWUBWUBWUBVWUBWUBWUBJB"), "YYRTSMNWU C C FSYUINDWOBV F AU V JB")
test.assert_equals(song_decoder("WUBKSDHEMIXUJWUBWUBRWUBWUBWUBSWUBWUBWUBHWUBWUBWUB"), "KSDHEMIXUJ R S H")
test.assert_equals(song_decoder("AWUBWUBWUB"), "A")
test.assert_equals(song_decoder("AWUBBWUBCWUBD"), "A B C D")
test.assert_equals(song_decoder("WUBWWUBWUBWUBUWUBWUBBWUB"), "W U B")
test.assert_equals(song_decoder("WUWUBBWWUBUB"), "WU BW UB")
test.assert_equals(song_decoder("WUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUABWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUBWUB"), "WUAB")
test.assert_equals(song_decoder("U"), "U")
test.assert_equals(song_decoder("WUWUB"), "WU")
test.assert_equals(song_decoder("UBWUB"), "UB")
test.assert_equals(song_decoder("WUWUBUBWUBUWUB"), "WU UB U")
test.assert_equals(song_decoder("WUBWWUBAWUB"), "W A")
test.assert_equals(song_decoder("WUUUUU"), "WUUUUU")
test.assert_equals(song_decoder("WUBWUBA"), "A")
| [
"fer.esp@gmail.com"
] | fer.esp@gmail.com |
a7074be35fd200abbb3a212417a1f3e90fed2b0f | f856712772089ac9c9c3fa95995aef752a3219ea | /run.py | 5653c3f9133d077c69f7930414f1cd74b42fcc51 | [] | no_license | yuhangT/Pytorch_Bert_1 | bb6b3da2c91b428e2a325a12f2819219adf3bc13 | 68a6bb58bd748633211852354a9f3fb43fc5fc11 | refs/heads/master | 2021-04-02T03:07:53.883075 | 2020-04-05T09:56:24 | 2020-04-05T09:56:24 | 248,237,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 27,479 | py | import os
import shutil
import config
import time
import torch
from data_process import data_generator
from sklearn.metrics import accuracy_score, f1_score
from tqdm import tqdm, trange
from optimization import BERTAdam
import random
import numpy as np
from modeling import Discourage, DiscourageMask
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
####
# Name: print_model_result
# Function: 输出模型结果
####
def print_model_result(result, data_type='train'):
# print("***** Eval results in " + data_type + "*****")
# tmp = "\t"
for key in sorted(result.keys()):
# tmp += "%s = %s\t" % (key, str(result[key]).strip())
print(" \t %s = %-5.5f" % (key, float(result[key])), end="")
# print(tmp, end=" ")
####
# Name: model_eval
# Function: 在验证集和测试集上,评估模型
# return: 模型评估结果
####
def model_eval(model, data_loader, data_type='dev'):
result_sum = {}
nm_batch = 0
labels_pred = np.array([])
labels_true = np.array([])
for step, batch in enumerate(tqdm(data_loader)):
batch = tuple(t.to(config.device) for t in batch)
model.eval()
with torch.no_grad():
_, pred = model(batch)
pred = np.argmax(pred.detach().cpu().numpy(), axis=1)
labels_pred = np.append(labels_pred, pred)
true = model.get_labels_data().detach().cpu().numpy()
labels_true = np.append(labels_true, true)
result_temp = model.get_result()
result_sum['loss'] = result_sum.get('loss', 0) + result_temp['loss']
nm_batch += 1
result_sum["accuracy"] = accuracy_score(labels_true, labels_pred)
result_sum["f1"] = f1_score(labels_true, labels_pred, average='macro')
result_sum["loss"] = result_sum["loss"] / nm_batch
with open(os.path.join(config.output_dir, config.MODEL_NAME + '_' + data_type + '_result.txt'), 'a+',
encoding='utf-8') as writer:
print("***** Eval results in " + data_type + "*****")
for key in sorted(result_sum.keys()):
print("%s = %s" % (key, str(result_sum[key])))
writer.write("%s = %s\n" % (key, str(result_sum[key])))
writer.write('\n')
return result_sum
####
# Name: save_best_model
# Function: 在验证集或者训练集上, 保存loss最小或者准确度最高的模型参数。
####
def save_best_model(model, v, data_type='dev', use_accuracy=False):
# 保存模型
if not use_accuracy and data_type == 'dev':
if config.eval_best_loss > v:
config.eval_best_loss = v
state = {'net': model.state_dict()}
save_path = os.path.join(config.output_dir, config.MODEL_NAME + '_state_dict_' +
data_type + '_loss_' + str(v) + '.model')
print("Save.......")
torch.save(state, save_path)
config.train_best_loss_model = save_path
# 以精确度作为评估标准
if use_accuracy and data_type == 'dev':
if config.eval_best_accuracy < v:
config.eval_best_accuracy = v
state = {'net': model.state_dict()}
save_path = os.path.join(config.output_dir, config.MODEL_NAME + '_state_dict_'
+ data_type + '_ac_' + str(v) + '.model')
print("Save.......")
torch.save(state, save_path)
config.train_best_accuracy_model = save_path
####
# Name: train
# Function: 训练并评估函数
####
def train(model):
n_gpu = torch.cuda.device_count()
logger.info("device: {} n_gpu: {}".format(config.device, n_gpu))
if n_gpu > 0:
torch.cuda.manual_seed_all(config.seed)
if n_gpu > 1:
model = torch.nn.DataParallel(model)
model_it_self = model.module if hasattr(model, 'module') else model
global_step = 0
num_train_steps = data_generator.get_num_train_steps()
param_optimizer = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
optimizer_grouped_parameters = [
{'params': [p for n, p in param_optimizer if n not in no_decay], 'weight_decay_rate': 0.01},
{'params': [p for n, p in param_optimizer if n in no_decay], 'weight_decay_rate': 0.0}
]
optimizer = BERTAdam(optimizer_grouped_parameters,
lr=config.learning_rate,
warmup=config.warmup_proportion,
t_total=num_train_steps)
dev_loader = data_generator.get_dev_loader()
train_loader = data_generator.get_train_loader()
for epoch in trange(int(config.num_train_epochs), desc="Epoch"):
for step, batch in enumerate(tqdm(train_loader, desc="Iteration")):
batch = tuple(t.to(config.device) for t in batch)
loss, output = model(batch, global_step, -1)
if n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu.
if config.gradient_accumulation_steps > 1:
loss = loss / config.gradient_accumulation_steps
# opt.zero_grad()
loss.backward()
if (step + 1) % config.gradient_accumulation_steps == 0:
optimizer.step()
model.zero_grad()
global_step += 1
#if global_step % config.print_interval == 0:
# print_model_result(model_it_self.get_result())
if global_step % config.eval_interval == 0 or global_step == num_train_steps:
if config.do_eval:
print("\nepoch:{} global:{}\t".format(epoch, global_step))
eval_result = model_eval(model_it_self, dev_loader, data_type='dev')
# 保存模型,使用loss为评估标准
save_best_model(model_it_self, eval_result['loss'], data_type='dev')
if config.SAVE_USE_ACCURACY:
save_best_model(model_it_self, eval_result['accuracy'], data_type='dev',
use_accuracy=config.SAVE_USE_ACCURACY)
shutil.copy(config.train_best_accuracy_model, os.path.join(config.output_dir, 'best_ac_model.bin'))
shutil.copy(config.train_best_loss_model, os.path.join(config.output_dir, 'best_loss_model.bin'))
####
# Name: init
# Function: 初始化
####
def init(model):
if config.init_checkpoint is not None:
state_dict = torch.load(config.init_checkpoint, map_location='cpu')
new_keys = ["embeddings.word_embeddings.weight", "embeddings.position_embeddings.weight", "embeddings.token_type_embeddings.weight", "embeddings.LayerNorm.gamma", "embeddings.LayerNorm.beta", "encoder.layer.0.attention.self.query.weight", "encoder.layer.0.attention.self.query.bias", "encoder.layer.0.attention.self.key.weight", "encoder.layer.0.attention.self.key.bias", "encoder.layer.0.attention.self.value.weight", "encoder.layer.0.attention.self.value.bias", "encoder.layer.0.attention.output.dense.weight", "encoder.layer.0.attention.output.dense.bias", "encoder.layer.0.attention.output.LayerNorm.gamma", "encoder.layer.0.attention.output.LayerNorm.beta", "encoder.layer.0.intermediate.dense.weight", "encoder.layer.0.intermediate.dense.bias", "encoder.layer.0.output.dense.weight", "encoder.layer.0.output.dense.bias", "encoder.layer.0.output.LayerNorm.gamma", "encoder.layer.0.output.LayerNorm.beta", "encoder.layer.1.attention.self.query.weight", "encoder.layer.1.attention.self.query.bias", "encoder.layer.1.attention.self.key.weight", "encoder.layer.1.attention.self.key.bias", "encoder.layer.1.attention.self.value.weight", "encoder.layer.1.attention.self.value.bias", "encoder.layer.1.attention.output.dense.weight", "encoder.layer.1.attention.output.dense.bias", "encoder.layer.1.attention.output.LayerNorm.gamma", "encoder.layer.1.attention.output.LayerNorm.beta", "encoder.layer.1.intermediate.dense.weight", "encoder.layer.1.intermediate.dense.bias", "encoder.layer.1.output.dense.weight", "encoder.layer.1.output.dense.bias", "encoder.layer.1.output.LayerNorm.gamma", "encoder.layer.1.output.LayerNorm.beta", "encoder.layer.2.attention.self.query.weight", "encoder.layer.2.attention.self.query.bias", "encoder.layer.2.attention.self.key.weight", "encoder.layer.2.attention.self.key.bias", "encoder.layer.2.attention.self.value.weight", "encoder.layer.2.attention.self.value.bias", "encoder.layer.2.attention.output.dense.weight", "encoder.layer.2.attention.output.dense.bias", "encoder.layer.2.attention.output.LayerNorm.gamma", "encoder.layer.2.attention.output.LayerNorm.beta", "encoder.layer.2.intermediate.dense.weight", "encoder.layer.2.intermediate.dense.bias", "encoder.layer.2.output.dense.weight", "encoder.layer.2.output.dense.bias", "encoder.layer.2.output.LayerNorm.gamma", "encoder.layer.2.output.LayerNorm.beta", "encoder.layer.3.attention.self.query.weight", "encoder.layer.3.attention.self.query.bias", "encoder.layer.3.attention.self.key.weight", "encoder.layer.3.attention.self.key.bias", "encoder.layer.3.attention.self.value.weight", "encoder.layer.3.attention.self.value.bias", "encoder.layer.3.attention.output.dense.weight", "encoder.layer.3.attention.output.dense.bias", "encoder.layer.3.attention.output.LayerNorm.gamma", "encoder.layer.3.attention.output.LayerNorm.beta", "encoder.layer.3.intermediate.dense.weight", "encoder.layer.3.intermediate.dense.bias", "encoder.layer.3.output.dense.weight", "encoder.layer.3.output.dense.bias", "encoder.layer.3.output.LayerNorm.gamma", "encoder.layer.3.output.LayerNorm.beta", "encoder.layer.4.attention.self.query.weight", "encoder.layer.4.attention.self.query.bias", "encoder.layer.4.attention.self.key.weight", "encoder.layer.4.attention.self.key.bias", "encoder.layer.4.attention.self.value.weight", "encoder.layer.4.attention.self.value.bias", "encoder.layer.4.attention.output.dense.weight", "encoder.layer.4.attention.output.dense.bias", "encoder.layer.4.attention.output.LayerNorm.gamma", "encoder.layer.4.attention.output.LayerNorm.beta", "encoder.layer.4.intermediate.dense.weight", "encoder.layer.4.intermediate.dense.bias", "encoder.layer.4.output.dense.weight", "encoder.layer.4.output.dense.bias", "encoder.layer.4.output.LayerNorm.gamma", "encoder.layer.4.output.LayerNorm.beta", "encoder.layer.5.attention.self.query.weight", "encoder.layer.5.attention.self.query.bias", "encoder.layer.5.attention.self.key.weight", "encoder.layer.5.attention.self.key.bias", "encoder.layer.5.attention.self.value.weight", "encoder.layer.5.attention.self.value.bias", "encoder.layer.5.attention.output.dense.weight", "encoder.layer.5.attention.output.dense.bias", "encoder.layer.5.attention.output.LayerNorm.gamma", "encoder.layer.5.attention.output.LayerNorm.beta", "encoder.layer.5.intermediate.dense.weight", "encoder.layer.5.intermediate.dense.bias", "encoder.layer.5.output.dense.weight", "encoder.layer.5.output.dense.bias", "encoder.layer.5.output.LayerNorm.gamma", "encoder.layer.5.output.LayerNorm.beta", "encoder.layer.6.attention.self.query.weight", "encoder.layer.6.attention.self.query.bias", "encoder.layer.6.attention.self.key.weight", "encoder.layer.6.attention.self.key.bias", "encoder.layer.6.attention.self.value.weight", "encoder.layer.6.attention.self.value.bias", "encoder.layer.6.attention.output.dense.weight", "encoder.layer.6.attention.output.dense.bias", "encoder.layer.6.attention.output.LayerNorm.gamma", "encoder.layer.6.attention.output.LayerNorm.beta", "encoder.layer.6.intermediate.dense.weight", "encoder.layer.6.intermediate.dense.bias", "encoder.layer.6.output.dense.weight", "encoder.layer.6.output.dense.bias", "encoder.layer.6.output.LayerNorm.gamma", "encoder.layer.6.output.LayerNorm.beta", "encoder.layer.7.attention.self.query.weight", "encoder.layer.7.attention.self.query.bias", "encoder.layer.7.attention.self.key.weight", "encoder.layer.7.attention.self.key.bias", "encoder.layer.7.attention.self.value.weight", "encoder.layer.7.attention.self.value.bias", "encoder.layer.7.attention.output.dense.weight", "encoder.layer.7.attention.output.dense.bias", "encoder.layer.7.attention.output.LayerNorm.gamma", "encoder.layer.7.attention.output.LayerNorm.beta", "encoder.layer.7.intermediate.dense.weight", "encoder.layer.7.intermediate.dense.bias", "encoder.layer.7.output.dense.weight", "encoder.layer.7.output.dense.bias", "encoder.layer.7.output.LayerNorm.gamma", "encoder.layer.7.output.LayerNorm.beta", "encoder.layer.8.attention.self.query.weight", "encoder.layer.8.attention.self.query.bias", "encoder.layer.8.attention.self.key.weight", "encoder.layer.8.attention.self.key.bias", "encoder.layer.8.attention.self.value.weight", "encoder.layer.8.attention.self.value.bias", "encoder.layer.8.attention.output.dense.weight", "encoder.layer.8.attention.output.dense.bias", "encoder.layer.8.attention.output.LayerNorm.gamma", "encoder.layer.8.attention.output.LayerNorm.beta", "encoder.layer.8.intermediate.dense.weight", "encoder.layer.8.intermediate.dense.bias", "encoder.layer.8.output.dense.weight", "encoder.layer.8.output.dense.bias", "encoder.layer.8.output.LayerNorm.gamma", "encoder.layer.8.output.LayerNorm.beta", "encoder.layer.9.attention.self.query.weight", "encoder.layer.9.attention.self.query.bias", "encoder.layer.9.attention.self.key.weight", "encoder.layer.9.attention.self.key.bias", "encoder.layer.9.attention.self.value.weight", "encoder.layer.9.attention.self.value.bias", "encoder.layer.9.attention.output.dense.weight", "encoder.layer.9.attention.output.dense.bias", "encoder.layer.9.attention.output.LayerNorm.gamma", "encoder.layer.9.attention.output.LayerNorm.beta", "encoder.layer.9.intermediate.dense.weight", "encoder.layer.9.intermediate.dense.bias", "encoder.layer.9.output.dense.weight", "encoder.layer.9.output.dense.bias", "encoder.layer.9.output.LayerNorm.gamma", "encoder.layer.9.output.LayerNorm.beta", "encoder.layer.10.attention.self.query.weight", "encoder.layer.10.attention.self.query.bias", "encoder.layer.10.attention.self.key.weight", "encoder.layer.10.attention.self.key.bias", "encoder.layer.10.attention.self.value.weight", "encoder.layer.10.attention.self.value.bias", "encoder.layer.10.attention.output.dense.weight", "encoder.layer.10.attention.output.dense.bias", "encoder.layer.10.attention.output.LayerNorm.gamma", "encoder.layer.10.attention.output.LayerNorm.beta", "encoder.layer.10.intermediate.dense.weight", "encoder.layer.10.intermediate.dense.bias", "encoder.layer.10.output.dense.weight", "encoder.layer.10.output.dense.bias", "encoder.layer.10.output.LayerNorm.gamma", "encoder.layer.10.output.LayerNorm.beta", "encoder.layer.11.attention.self.query.weight", "encoder.layer.11.attention.self.query.bias", "encoder.layer.11.attention.self.key.weight", "encoder.layer.11.attention.self.key.bias", "encoder.layer.11.attention.self.value.weight", "encoder.layer.11.attention.self.value.bias", "encoder.layer.11.attention.output.dense.weight", "encoder.layer.11.attention.output.dense.bias", "encoder.layer.11.attention.output.LayerNorm.gamma", "encoder.layer.11.attention.output.LayerNorm.beta", "encoder.layer.11.intermediate.dense.weight", "encoder.layer.11.intermediate.dense.bias", "encoder.layer.11.output.dense.weight", "encoder.layer.11.output.dense.bias", "encoder.layer.11.output.LayerNorm.gamma", "encoder.layer.11.output.LayerNorm.beta", "pooler.dense.weight", "pooler.dense.bias"]
old_keys = ["bert.embeddings.word_embeddings.weight", "bert.embeddings.position_embeddings.weight", "bert.embeddings.token_type_embeddings.weight", "bert.embeddings.LayerNorm.weight", "bert.embeddings.LayerNorm.bias", "bert.encoder.layer.0.attention.self.query.weight", "bert.encoder.layer.0.attention.self.query.bias", "bert.encoder.layer.0.attention.self.key.weight", "bert.encoder.layer.0.attention.self.key.bias", "bert.encoder.layer.0.attention.self.value.weight", "bert.encoder.layer.0.attention.self.value.bias", "bert.encoder.layer.0.attention.output.dense.weight", "bert.encoder.layer.0.attention.output.dense.bias", "bert.encoder.layer.0.attention.output.LayerNorm.weight", "bert.encoder.layer.0.attention.output.LayerNorm.bias", "bert.encoder.layer.0.intermediate.dense.weight", "bert.encoder.layer.0.intermediate.dense.bias", "bert.encoder.layer.0.output.dense.weight", "bert.encoder.layer.0.output.dense.bias", "bert.encoder.layer.0.output.LayerNorm.weight", "bert.encoder.layer.0.output.LayerNorm.bias", "bert.encoder.layer.1.attention.self.query.weight", "bert.encoder.layer.1.attention.self.query.bias", "bert.encoder.layer.1.attention.self.key.weight", "bert.encoder.layer.1.attention.self.key.bias", "bert.encoder.layer.1.attention.self.value.weight", "bert.encoder.layer.1.attention.self.value.bias", "bert.encoder.layer.1.attention.output.dense.weight", "bert.encoder.layer.1.attention.output.dense.bias", "bert.encoder.layer.1.attention.output.LayerNorm.weight", "bert.encoder.layer.1.attention.output.LayerNorm.bias", "bert.encoder.layer.1.intermediate.dense.weight", "bert.encoder.layer.1.intermediate.dense.bias", "bert.encoder.layer.1.output.dense.weight", "bert.encoder.layer.1.output.dense.bias", "bert.encoder.layer.1.output.LayerNorm.weight", "bert.encoder.layer.1.output.LayerNorm.bias", "bert.encoder.layer.2.attention.self.query.weight", "bert.encoder.layer.2.attention.self.query.bias", "bert.encoder.layer.2.attention.self.key.weight", "bert.encoder.layer.2.attention.self.key.bias", "bert.encoder.layer.2.attention.self.value.weight", "bert.encoder.layer.2.attention.self.value.bias", "bert.encoder.layer.2.attention.output.dense.weight", "bert.encoder.layer.2.attention.output.dense.bias", "bert.encoder.layer.2.attention.output.LayerNorm.weight", "bert.encoder.layer.2.attention.output.LayerNorm.bias", "bert.encoder.layer.2.intermediate.dense.weight", "bert.encoder.layer.2.intermediate.dense.bias", "bert.encoder.layer.2.output.dense.weight", "bert.encoder.layer.2.output.dense.bias", "bert.encoder.layer.2.output.LayerNorm.weight", "bert.encoder.layer.2.output.LayerNorm.bias", "bert.encoder.layer.3.attention.self.query.weight", "bert.encoder.layer.3.attention.self.query.bias", "bert.encoder.layer.3.attention.self.key.weight", "bert.encoder.layer.3.attention.self.key.bias", "bert.encoder.layer.3.attention.self.value.weight", "bert.encoder.layer.3.attention.self.value.bias", "bert.encoder.layer.3.attention.output.dense.weight", "bert.encoder.layer.3.attention.output.dense.bias", "bert.encoder.layer.3.attention.output.LayerNorm.weight", "bert.encoder.layer.3.attention.output.LayerNorm.bias", "bert.encoder.layer.3.intermediate.dense.weight", "bert.encoder.layer.3.intermediate.dense.bias", "bert.encoder.layer.3.output.dense.weight", "bert.encoder.layer.3.output.dense.bias", "bert.encoder.layer.3.output.LayerNorm.weight", "bert.encoder.layer.3.output.LayerNorm.bias", "bert.encoder.layer.4.attention.self.query.weight", "bert.encoder.layer.4.attention.self.query.bias", "bert.encoder.layer.4.attention.self.key.weight", "bert.encoder.layer.4.attention.self.key.bias", "bert.encoder.layer.4.attention.self.value.weight", "bert.encoder.layer.4.attention.self.value.bias", "bert.encoder.layer.4.attention.output.dense.weight", "bert.encoder.layer.4.attention.output.dense.bias", "bert.encoder.layer.4.attention.output.LayerNorm.weight", "bert.encoder.layer.4.attention.output.LayerNorm.bias", "bert.encoder.layer.4.intermediate.dense.weight", "bert.encoder.layer.4.intermediate.dense.bias", "bert.encoder.layer.4.output.dense.weight", "bert.encoder.layer.4.output.dense.bias", "bert.encoder.layer.4.output.LayerNorm.weight", "bert.encoder.layer.4.output.LayerNorm.bias", "bert.encoder.layer.5.attention.self.query.weight", "bert.encoder.layer.5.attention.self.query.bias", "bert.encoder.layer.5.attention.self.key.weight", "bert.encoder.layer.5.attention.self.key.bias", "bert.encoder.layer.5.attention.self.value.weight", "bert.encoder.layer.5.attention.self.value.bias", "bert.encoder.layer.5.attention.output.dense.weight", "bert.encoder.layer.5.attention.output.dense.bias", "bert.encoder.layer.5.attention.output.LayerNorm.weight", "bert.encoder.layer.5.attention.output.LayerNorm.bias", "bert.encoder.layer.5.intermediate.dense.weight", "bert.encoder.layer.5.intermediate.dense.bias", "bert.encoder.layer.5.output.dense.weight", "bert.encoder.layer.5.output.dense.bias", "bert.encoder.layer.5.output.LayerNorm.weight", "bert.encoder.layer.5.output.LayerNorm.bias", "bert.encoder.layer.6.attention.self.query.weight", "bert.encoder.layer.6.attention.self.query.bias", "bert.encoder.layer.6.attention.self.key.weight", "bert.encoder.layer.6.attention.self.key.bias", "bert.encoder.layer.6.attention.self.value.weight", "bert.encoder.layer.6.attention.self.value.bias", "bert.encoder.layer.6.attention.output.dense.weight", "bert.encoder.layer.6.attention.output.dense.bias", "bert.encoder.layer.6.attention.output.LayerNorm.weight", "bert.encoder.layer.6.attention.output.LayerNorm.bias", "bert.encoder.layer.6.intermediate.dense.weight", "bert.encoder.layer.6.intermediate.dense.bias", "bert.encoder.layer.6.output.dense.weight", "bert.encoder.layer.6.output.dense.bias", "bert.encoder.layer.6.output.LayerNorm.weight", "bert.encoder.layer.6.output.LayerNorm.bias", "bert.encoder.layer.7.attention.self.query.weight", "bert.encoder.layer.7.attention.self.query.bias", "bert.encoder.layer.7.attention.self.key.weight", "bert.encoder.layer.7.attention.self.key.bias", "bert.encoder.layer.7.attention.self.value.weight", "bert.encoder.layer.7.attention.self.value.bias", "bert.encoder.layer.7.attention.output.dense.weight", "bert.encoder.layer.7.attention.output.dense.bias", "bert.encoder.layer.7.attention.output.LayerNorm.weight", "bert.encoder.layer.7.attention.output.LayerNorm.bias", "bert.encoder.layer.7.intermediate.dense.weight", "bert.encoder.layer.7.intermediate.dense.bias", "bert.encoder.layer.7.output.dense.weight", "bert.encoder.layer.7.output.dense.bias", "bert.encoder.layer.7.output.LayerNorm.weight", "bert.encoder.layer.7.output.LayerNorm.bias", "bert.encoder.layer.8.attention.self.query.weight", "bert.encoder.layer.8.attention.self.query.bias", "bert.encoder.layer.8.attention.self.key.weight", "bert.encoder.layer.8.attention.self.key.bias", "bert.encoder.layer.8.attention.self.value.weight", "bert.encoder.layer.8.attention.self.value.bias", "bert.encoder.layer.8.attention.output.dense.weight", "bert.encoder.layer.8.attention.output.dense.bias", "bert.encoder.layer.8.attention.output.LayerNorm.weight", "bert.encoder.layer.8.attention.output.LayerNorm.bias", "bert.encoder.layer.8.intermediate.dense.weight", "bert.encoder.layer.8.intermediate.dense.bias", "bert.encoder.layer.8.output.dense.weight", "bert.encoder.layer.8.output.dense.bias", "bert.encoder.layer.8.output.LayerNorm.weight", "bert.encoder.layer.8.output.LayerNorm.bias", "bert.encoder.layer.9.attention.self.query.weight", "bert.encoder.layer.9.attention.self.query.bias", "bert.encoder.layer.9.attention.self.key.weight", "bert.encoder.layer.9.attention.self.key.bias", "bert.encoder.layer.9.attention.self.value.weight", "bert.encoder.layer.9.attention.self.value.bias", "bert.encoder.layer.9.attention.output.dense.weight", "bert.encoder.layer.9.attention.output.dense.bias", "bert.encoder.layer.9.attention.output.LayerNorm.weight", "bert.encoder.layer.9.attention.output.LayerNorm.bias", "bert.encoder.layer.9.intermediate.dense.weight", "bert.encoder.layer.9.intermediate.dense.bias", "bert.encoder.layer.9.output.dense.weight", "bert.encoder.layer.9.output.dense.bias", "bert.encoder.layer.9.output.LayerNorm.weight", "bert.encoder.layer.9.output.LayerNorm.bias", "bert.encoder.layer.10.attention.self.query.weight", "bert.encoder.layer.10.attention.self.query.bias", "bert.encoder.layer.10.attention.self.key.weight", "bert.encoder.layer.10.attention.self.key.bias", "bert.encoder.layer.10.attention.self.value.weight", "bert.encoder.layer.10.attention.self.value.bias", "bert.encoder.layer.10.attention.output.dense.weight", "bert.encoder.layer.10.attention.output.dense.bias", "bert.encoder.layer.10.attention.output.LayerNorm.weight", "bert.encoder.layer.10.attention.output.LayerNorm.bias", "bert.encoder.layer.10.intermediate.dense.weight", "bert.encoder.layer.10.intermediate.dense.bias", "bert.encoder.layer.10.output.dense.weight", "bert.encoder.layer.10.output.dense.bias", "bert.encoder.layer.10.output.LayerNorm.weight", "bert.encoder.layer.10.output.LayerNorm.bias", "bert.encoder.layer.11.attention.self.query.weight", "bert.encoder.layer.11.attention.self.query.bias", "bert.encoder.layer.11.attention.self.key.weight", "bert.encoder.layer.11.attention.self.key.bias", "bert.encoder.layer.11.attention.self.value.weight", "bert.encoder.layer.11.attention.self.value.bias", "bert.encoder.layer.11.attention.output.dense.weight", "bert.encoder.layer.11.attention.output.dense.bias", "bert.encoder.layer.11.attention.output.LayerNorm.weight", "bert.encoder.layer.11.attention.output.LayerNorm.bias", "bert.encoder.layer.11.intermediate.dense.weight", "bert.encoder.layer.11.intermediate.dense.bias", "bert.encoder.layer.11.output.dense.weight", "bert.encoder.layer.11.output.dense.bias", "bert.encoder.layer.11.output.LayerNorm.weight", "bert.encoder.layer.11.output.LayerNorm.bias", "bert.pooler.dense.weight", "bert.pooler.dense.bias", "cls.predictions.bias", "cls.predictions.transform.dense.weight", "cls.predictions.transform.dense.bias", "cls.predictions.transform.LayerNorm.weight", "cls.predictions.transform.LayerNorm.bias", "cls.predictions.decoder.weight", "cls.seq_relationship.weight", "cls.seq_relationship.bias"]
for key in list(state_dict.keys()):
if 'cls.' in key:
state_dict.pop(key)
for old_key, new_key in zip(old_keys, new_keys):
state_dict[new_key] = state_dict.pop(old_key)
model.bert.load_state_dict(state_dict)
def eval_test(model):
best_model_path = [os.path.join(config.output_dir, config.eval_best_accuracy_model),
os.path.join(config.output_dir, config.eval_best_loss_model)]
for best_model in best_model_path:
checkpoint = torch.load(best_model)
model.load_state_dict(checkpoint['net'], strict=False)
model = model.to(config.device)
test_loader = data_generator.get_test_loader()
print("\n********" + best_model + "********")
model_eval(model, test_loader, data_type='test')
pass
def main():
# random.seed(config.seed)
# np.random.seed(config.seed)
# torch.manual_seed(config.seed)
model_set = {
"Discourage": Discourage,
"DiscourageMask": DiscourageMask
}
start_time = time.time()
os.makedirs(config.output_dir, exist_ok=True)
args = len(data_generator.get_labels()), data_generator.get_num_train_steps(), -1
model = model_set[config.MODEL_NAME](*args)
model = model.to(config.device)
init(model)
if config.do_train:
train(model)
if config.do_test:
eval_test(model)
end_time = time.time()
print("总计耗时:%d m" % int((end_time - start_time) / 60))
pass
if __name__ == '__main__':
main()
| [
"noreply@github.com"
] | noreply@github.com |
58ae9d0ac1d18829e52e80273b8d9f0ebf27832e | c5121e75b2529cbda684764d1cea15390f5cbb28 | /myproject/myproject/migrations/0005_auto_20170117_1936.py | 5ece6b41f263ff6f1f39e2a66b3962c05ab5495b | [] | no_license | MerNat/docker-django-celery-rabbitmq | b0c5681ef2124bc69e0e914d55c1dffcc190f34d | eb62c70ca0d49a3f44221790cec2506f8710f58a | refs/heads/master | 2020-04-19T16:33:35.746755 | 2017-01-20T22:26:59 | 2017-01-20T22:34:04 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 706 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.9.8 on 2017-01-17 19:36
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('myproject', '0004_auto_20170117_1922'),
]
operations = [
migrations.AddField(
model_name='job',
name='contact_info',
field=models.CharField(default=b'contact', max_length=20, verbose_name=b'ContactInfo'),
),
migrations.AlterField(
model_name='job',
name='type',
field=models.CharField(choices=[(b'power', b'power'), (b'github', b'github')], max_length=20),
),
]
| [
"nayanchandni@gmail.com"
] | nayanchandni@gmail.com |
f48a8828ada2420276a4bd2f3b52fd0351ec1ef0 | 97f10f5b009ec8a2f96733154e4f64e975c334d4 | /data/__init__.py | 10cb991f088b4d926eb8f7930576439eb21ac938 | [
"MIT"
] | permissive | RohitSaha/MichiGAN | 421eb3103b299ec18c20fd4edd52c153972afe85 | a4231bcc5e38cdfa58a0e1373aefc047dd385b12 | refs/heads/master | 2022-12-07T01:32:55.102860 | 2020-08-11T16:16:16 | 2020-08-11T16:16:16 | 282,299,185 | 1 | 0 | null | 2020-07-24T19:21:18 | 2020-07-24T19:21:17 | null | UTF-8 | Python | false | false | 2,263 | py | """
Copyright (C) University of Science and Technology of China.
Licensed under the MIT License.
"""
import importlib
import torch.utils.data
from data.base_dataset import BaseDataset
def find_dataset_using_name(dataset_name):
# Given the option --dataset [datasetname],
# the file "datasets/datasetname_dataset.py"
# will be imported.
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
# In the file, the class called DatasetNameDataset() will
# be instantiated. It has to be a subclass of BaseDataset,
# and it is case-insensitive.
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower() \
and issubclass(cls, BaseDataset):
dataset = cls
if dataset is None:
raise ValueError("In %s.py, there should be a subclass of BaseDataset "
"with class name that matches %s in lowercase." %
(dataset_filename, target_dataset_name))
return dataset
def get_option_setter(dataset_name):
dataset_class = find_dataset_using_name(dataset_name)
return dataset_class.modify_commandline_options
def create_dataloader(opt, step=1):
dataset = find_dataset_using_name(opt.dataset_mode)
instance = dataset()
if 'custom' in opt.dataset_mode:
instance.initialize(opt, step)
else:
instance.initialize(opt)
print("dataset [%s] of size %d was created" %
(type(instance).__name__, len(instance)))
dataloader = torch.utils.data.DataLoader(
instance,
batch_size=opt.batchSize,
shuffle=not opt.serial_batches,
num_workers=int(opt.nThreads),
drop_last=opt.isTrain
)
return dataloader
def create_dataset_ms(opt, step=1):
dataset = find_dataset_using_name(opt.dataset_mode)
instance = dataset()
if 'custom' in opt.dataset_mode:
instance.initialize(opt, step)
else:
instance.initialize(opt)
print("dataset [%s] of size %d was created" %
(type(instance).__name__, len(instance)))
return instance
| [
"pleaseconnectwifi@gmail.com"
] | pleaseconnectwifi@gmail.com |
7394196141ce1c2e71451faef43e3d4e10449f9e | 1282d997743da549f64b70b7e7dbc77cf4240331 | /02/xor.py | fbfaea3a2b45af86a80bca94aa58cfb97552f63b | [] | no_license | maxxibull/crypto | 85c77fceba81dde231dbb648a23047a5eead1237 | 28affa63a27ef399695b4d2f27fc8ad8d38cda79 | refs/heads/master | 2022-07-19T08:28:46.806941 | 2020-05-22T12:49:44 | 2020-05-22T12:49:44 | 247,513,361 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 577 | py | import argparse
def main(args):
output = "".join([str(ord(a) ^ ord(b)) for a,b in zip(args.iv_previous,args.iv_new)])
with open(args.output_path, mode="w") as output_file:
output_file.write(output)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--iv-previous", required=True, help="Previous nitial vector")
parser.add_argument("--iv-new", required=True, help="New initial vector")
parser.add_argument("--output-path", required=True, help="Path to output file")
args = parser.parse_args()
main(args)
| [
"maciej.kabala@icloud.com"
] | maciej.kabala@icloud.com |
0f6fd30a83ee03904a7b5b16929c706f780840e8 | f82f9f40a2618d73c0cab6b1da178ca5a1bb5991 | /django/thumbnailgen/ppm_parser.py | c4f333d5da0f6f5558e956ad5db31e1f7150a65f | [] | no_license | deejoe/QuickBBS | 3c1b971d53199e217a7cbbc7ddcf3afeb0d5efb1 | 468066c6289839703b5e1b34fa67d1e501e6a046 | refs/heads/master | 2020-12-07T15:42:46.249232 | 2016-07-27T18:18:43 | 2016-07-27T18:18:43 | 64,331,610 | 0 | 0 | null | 2016-07-27T18:16:43 | 2016-07-27T18:16:43 | null | UTF-8 | Python | false | false | 5,108 | py | """
Thumbnail services for the gallery.
This is the universal code for creating and manipulating the thumbnails
used by the gallery.
"""
import core_plugin
import os
import os.path
from PIL import Image
import cStringIO
class PluginOne(core_plugin.CorePlugin):
"""
Subclassed core plugin.
* ACCEPTABLE_FILE_EXTENSIONS is a list, that contains the (UPPERCASE),
File Extensions (DOTTED format, e.g. .GIF, not GIF) that this
plugin will manage.
* IMG_TAG - BOOLEAN - (e.g. .PNG, .GIF, .JPG)
* True - This plugin can make an IMAGE based thumbnail, for this
file type
* False - This plugin will not make an image thumbnail
* FRAME_TAG - BOOLEAN - (e.g. .TEXT, .MARKDOWN, etc)
* True - This plugin will return an TEXT based stream. That should
be displayed in the browser window.
* False - This plugin will not make an image thumbnail
* DEFAULT_ICON - String - The Default thumbnail image to use, if
IMG_TAG is False
* DEFAULT_BACKGROUND - String - The background of the table cell, for
this file format.
"""
ACCEPTABLE_FILE_EXTENSIONS = ['.pbm', '.pgm', 'ppm']
IMG_TAG = True
FRAME_TAG = False
#DEFAULT_ICON = r"/images/1431973815_text.png"
DEFAULT_BACKGROUND = "FAEBF4"
def create_thumbnail_from_file(cls, src_filename,
t_filename,
t_size=None):
"""
Create a thumbnail from a source file.
inputs -
* src_filename - String - This is the fully qualified filepathname
of the file to be thumbnailed.
* t_filename - String - This is the fully qualified filepathname
of the thumbnail file to be created.
* t_size - integer - This is the maximum size of the thumbnail.
The thumbnail will be t_size x t_size (e.g. 300 x 300)
output -
* The thumbnail file that is created at the t_filename location.
"""
if src_filename == t_filename:
raise RuntimeError("The source is the same as the target.")
if t_filename == None:
raise RuntimeError("The Target is not specified")
if os.path.exists(t_filename):
return None
if t_size == None:
raise RuntimeError("No Target size is defined")
try:
image_file = Image.open(src_filename)
image_file.thumbnail((t_size, t_size), Image.ANTIALIAS)
image_file.save(t_filename, "PNG", optimize=True)
return True
except IOError:
print "File thumbnail ", src_filename
print "save thumbnail ", t_filename
print "The File [%s] (ioerror) is damaged." % (src_filename)
except IndexError as detail:
print "File thumbnail ", src_filename
print "save thumbnail ", t_filename
print "The File [%s] (IndexError) is damaged." % (src_filename)
print detail
except TypeError:
print "File thumbnail ", src_filename
print "save thumbnail ", t_filename
print "The File [%s] (TypeError) is damaged." % (src_filename)
##########################################################################
def create_thumbnail_from_memory(self, memory_image=None,
t_filename=None,
t_size=None):
"""
Create a thumbnail from a memory image of the file.
inputs -
* memory_image - blob - This is the blob of image data, typically
a blob that has been read from a file, or a zip, etc.
* t_filename - String - This is the fully qualified filepathname
of the thumbnail file to be created.
* t_size - integer - This is the maximum size of the thumbnail.
The thumbnail will be t_size x t_size (e.g. 300 x 300)
output -
* The thumbnail file that is created at the t_filename location.
"""
if memory_image == None:
raise RuntimeError("No Memory Image is provided.")
if t_filename == None:
raise RuntimeError("The Target is not specified")
if os.path.exists(t_filename):
return None
if t_size == None:
raise RuntimeError("No Target size is defined")
try:
#
# Convert this to bytes io?
#
image_file = Image.open(cStringIO.StringIO(memory_image))
image_file.thumbnail((t_size, t_size), Image.ANTIALIAS)
image_file.save(t_filename, "PNG", optimize=True)
return True
except IOError:
print "save thumbnail ", t_filename
except IndexError as detail:
print "save thumbnail ", t_filename
print detail
except TypeError:
print "save thumbnail ", t_filename
| [
"Benjamin@schollnick.net"
] | Benjamin@schollnick.net |
efa042ef139c999206deb20d41dcf7665503cac3 | 131200f61f8fe4c59d2c96757ccca4e210f76455 | /django-whatsappbot-server/mysite/whatsappbot/functionalities/moodle/utility.py | d9bfdcc309f88d22a12a3c01ac49a4c5cc1ae290 | [] | no_license | RKKUNDU/info-mine | 31b80a4880958ccaad802638582e50cbe1766a87 | c86ae3fc4bf19221728006e245899686c519ba1c | refs/heads/main | 2023-01-31T19:57:42.482371 | 2020-12-18T13:05:35 | 2020-12-18T13:05:35 | 310,376,216 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 710 | py | import getpass
import sys
sys.path.append("../../")
class utils:
def get_credential():
'''
fetch moodle credential from config file or USER INPUT
'''
pass
# moodle = moodle_credential()
# if moodle.get_stored_token() is None or moodle.get_stored_userid is None:
# id, password = utils.take_input()
# moodle.write_credential(id, password)
# return moodle.get_stored_token(), moodle.get_stored_userid()
def take_input():
'''
Take moodle credential from user
'''
# id = input("Enter your Moodle User ID: ")
# password = getpass.getpass()
# return id, password
pass
| [
"kundu.rohit06@gmail.com"
] | kundu.rohit06@gmail.com |
987ab0b6c6d56227783ce5d319a505c1c5526fbf | aa64c62a3d246b87f3f1e5810a8f75b1d166aaf6 | /paradrop/daemon/paradrop/core/config/haproxy.py | fc29f54f582c5117987eb8e786324404287cdf69 | [
"Apache-2.0"
] | permissive | ParadropLabs/Paradrop | ca40b3373c0732c781f9c10d38da9b6e9fbd3453 | c910fd5ac1d1b5e234f40f9f5592cc981e9bb5db | refs/heads/master | 2023-02-26T17:51:53.058300 | 2022-03-01T17:46:10 | 2022-03-01T17:46:10 | 37,789,450 | 88 | 31 | Apache-2.0 | 2023-02-16T05:24:46 | 2015-06-20T23:18:38 | Python | UTF-8 | Python | false | false | 5,061 | py | """
This module is responsible for configuration haproxy.
"""
import os
import subprocess
from paradrop.base import settings
from paradrop.core.chute.chute_storage import ChuteStorage
from paradrop.core.container.chutecontainer import ChuteContainer
def generateConfigSections():
sections = []
sections.append({
"header": "global",
"lines": [
"daemon",
"maxconn 256",
]
})
sections.append({
"header": "defaults",
"lines": [
"mode http",
"timeout connect 5000ms",
"timeout client 50000ms",
"timeout server 50000ms"
]
})
sections.append({
"header": "backend portal",
"lines": [
"server pd_portal 127.0.0.1:8080 maxconn 256"
]
})
# Custom variables:
# - req.querymarker: will be set to the literal "?" if the original request
# contains a query string. We will use this to construct a redirect with a
# query string only if needed.
# - req.subpath: will be set to the remainder of the path, if anything,
# after removing /chutes/<chutename>, e.g. "/chutes/hello-world/index.html"
# becomes "/index.html". This does not include the query string.
frontend = {
"header": "frontend http-in",
"lines": [
"bind *:80",
"default_backend portal",
"http-request set-var(req.querymarker) str(?) if { query -m found }",
"http-request set-var(req.subpath) path,regsub(^/chutes/[^/]+,)"
]
}
sections.append(frontend)
chuteStore = ChuteStorage()
chutes = chuteStore.getChuteList()
for chute in chutes:
port, service = chute.get_web_port_and_service()
if port is None or service is None:
continue
container = ChuteContainer(service.get_container_name())
if not container.isRunning():
continue
# Generate a rule that matches HTTP host header to chute name.
frontend['lines'].append("acl host_{} hdr(host) -i {}.chute.paradrop.org".format(
chute.name, chute.name))
frontend['lines'].append("use_backend {} if host_{}".format(
chute.name, chute.name))
# Generate rules that matches the beginning of the URL.
# We need to be careful and either have an exact match
# or make sure there is a slash or question mark after the chute name
# to avoid mix-ups, e.g. "sticky-board" and "sticky-board-new".
frontend['lines'].append("acl path_{} url /chutes/{}".format(
chute.name, chute.name))
frontend['lines'].append("acl path_{} url_beg /chutes/{}/".format(
chute.name, chute.name))
frontend['lines'].append("acl path_{} url_beg /chutes/{}?".format(
chute.name, chute.name))
# Try to find a host binding for the web port to redirect:
# http://<host addr>/chutes/<chute>/<path> ->
# http://<host addr>:<chute port>/<path>
#
# We need to do a lookup because the host port might be dynamically
# assigned by Docker.
#
# Use HTTP code 302 for the redirect, which will not be cached by the
# web browser. The port portion of the URL can change whenever the
# chute restarts, so we don't want web browsers to cache it. Browsers
# will cache a 301 (Moved Permanently) response.
portconf = container.getPortConfiguration(port, "tcp")
if len(portconf) > 0:
# TODO: Are there other elements in the list?
binding = portconf[0]
frontend['lines'].append("http-request replace-value Host (.*):(.*) \\1")
frontend['lines'].append("http-request redirect location http://%[req.hdr(host)]:{}%[var(req.subpath)]%[var(req.querymarker)]%[query] code 302 if path_{}".format(
binding['HostPort'], chute.name))
# Add a server at the chute's IP address.
sections.append({
"header": "backend {}".format(chute.name),
"lines": [
"server {} {}:{} maxconn 256".format(chute.name,
container.getIP(), port)
]
})
return sections
def writeConfigFile(output):
sections = generateConfigSections()
for section in sections:
output.write(section['header'] + "\n")
for line in section['lines']:
output.write(" " + line + "\n")
output.write("\n")
def reconfigureProxy(update):
"""
Reconfigure haproxy with forwarding and redirect rules.
"""
confFile = os.path.join(settings.RUNTIME_HOME_DIR, "haproxy.conf")
pidFile = os.path.join(settings.TMP_DIR, "haproxy.pid")
with open(confFile, "w") as output:
writeConfigFile(output)
cmd = ["haproxy", "-f", confFile, "-p", pidFile]
if os.path.exists(pidFile):
with open(pidFile, "r") as source:
pid = source.read().strip()
cmd.extend(["-sf", pid])
subprocess.call(cmd)
| [
"hartung@cs.wisc.edu"
] | hartung@cs.wisc.edu |
8ca76de59728c0a16b13a798b28e98865b5bdefc | a74e67be7b65bf303f3e3cf5430f372eacc0bdd2 | /utils/pascal_voc.py | ed105bc78bbc9f8c809cc0803ef98a3d3d54f011 | [] | no_license | Huanyongji/Yolo-tensorflow | 95c1af7d529640d806f0d2aa56d96f3603c2dbad | 7d18555f0f1884e6b8d7522601e60da5e64a1184 | refs/heads/master | 2021-08-24T00:32:00.313777 | 2017-12-07T08:28:18 | 2017-12-07T08:28:18 | 113,019,093 | 4 | 0 | null | 2017-12-04T11:02:54 | 2017-12-04T08:58:18 | Python | UTF-8 | Python | false | false | 5,820 | py | import os
import xml.etree.ElementTree as ET
import numpy as np
import cv2
import cPickle
import copy
import yolo.config as cfg
class pascal_voc(object):
def __init__(self, phase, rebuild=False):
self.devkil_path = os.path.join(cfg.PASCAL_PATH, 'VOCdevkit')
self.data_path = os.path.join(self.devkil_path, 'VOC2007')
self.cache_path = cfg.CACHE_PATH
self.batch_size = cfg.BATCH_SIZE
self.image_size = cfg.IMAGE_SIZE
self.cell_size = cfg.CELL_SIZE
self.classes = cfg.CLASSES
self.class_to_ind = dict(zip(self.classes, xrange(len(self.classes))))
self.flipped = cfg.FLIPPED
self.phase = phase
self.rebuild = rebuild
self.cursor = 0
self.epoch = 1
self.gt_labels = None
self.prepare()
def get(self):
images = np.zeros((self.batch_size, self.image_size, self.image_size, 3))
labels = np.zeros((self.batch_size, self.cell_size, self.cell_size, 25))
count = 0
while count < self.batch_size:
imname = self.gt_labels[self.cursor]['imname']
flipped = self.gt_labels[self.cursor]['flipped']
images[count, :, :, :] = self.image_read(imname, flipped)
labels[count, :, :, :] = self.gt_labels[self.cursor]['label']
count += 1
self.cursor += 1
if self.cursor >= len(self.gt_labels):
np.random.shuffle(self.gt_labels)
self.cursor = 0
self.epoch += 1
return images, labels
def image_read(self, imname, flipped=False):
image = cv2.imread(imname)
image = cv2.resize(image, (self.image_size, self.image_size))
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)
image = (image / 255.0) * 2.0 - 1.0
if flipped:
image = image[:, ::-1, :]
return image
def prepare(self):
gt_labels = self.load_labels()
if self.flipped:
print('Appending horizontally-flipped training examples ...')
gt_labels_cp = copy.deepcopy(gt_labels)
for idx in range(len(gt_labels_cp)):
gt_labels_cp[idx]['flipped'] = True
gt_labels_cp[idx]['label'] = gt_labels_cp[idx]['label'][:, ::-1, :]
for i in xrange(self.cell_size):
for j in xrange(self.cell_size):
if gt_labels_cp[idx]['label'][i, j, 0] == 1:
gt_labels_cp[idx]['label'][i, j, 1] = self.image_size - 1 - gt_labels_cp[idx]['label'][i, j, 1]
gt_labels += gt_labels_cp
np.random.shuffle(gt_labels)
self.gt_labels = gt_labels
return gt_labels
def load_labels(self):
cache_file = os.path.join(self.cache_path, 'pascal_' + self.phase + '_gt_labels.pkl')
if os.path.isfile(cache_file) and not self.rebuild:
print('Loading gt_labels from: ' + cache_file)
with open(cache_file, 'rb') as f:
gt_labels = cPickle.load(f)
return gt_labels
print('Processing gt_labels from: ' + self.data_path)
if not os.path.exists(self.cache_path):
os.makedirs(self.cache_path)
if self.phase == 'train':
txtname = os.path.join(self.data_path, 'ImageSets', 'Main',
'trainval.txt')
else:
txtname = os.path.join(self.data_path, 'ImageSets', 'Main',
'test.txt')
with open(txtname, 'r') as f:
self.image_index = [x.strip() for x in f.readlines()]
gt_labels = []
for index in self.image_index:
label, num = self.load_pascal_annotation(index)
if num == 0:
continue
imname = os.path.join(self.data_path, 'JPEGImages', index + '.jpg')
gt_labels.append({'imname': imname, 'label': label, 'flipped': False})
print('Saving gt_labels to: ' + cache_file)
with open(cache_file, 'wb') as f:
cPickle.dump(gt_labels, f)
return gt_labels
def load_pascal_annotation(self, index):
"""
Load image and bounding boxes info from XML file in the PASCAL VOC
format.
"""
imname = os.path.join(self.data_path, 'JPEGImages', index + '.jpg')
im = cv2.imread(imname)
h_ratio = 1.0 * self.image_size / im.shape[0]
w_ratio = 1.0 * self.image_size / im.shape[1]
# im = cv2.resize(im, [self.image_size, self.image_size])
label = np.zeros((self.cell_size, self.cell_size, 25))
filename = os.path.join(self.data_path, 'Annotations', index + '.xml')
tree = ET.parse(filename)
objs = tree.findall('object')
for obj in objs:
bbox = obj.find('bndbox')
# Make pixel indexes 0-based
x1 = max(min((float(bbox.find('xmin').text) - 1) * w_ratio, self.image_size - 1), 0)
y1 = max(min((float(bbox.find('ymin').text) - 1) * h_ratio, self.image_size - 1), 0)
x2 = max(min((float(bbox.find('xmax').text) - 1) * w_ratio, self.image_size - 1), 0)
y2 = max(min((float(bbox.find('ymax').text) - 1) * h_ratio, self.image_size - 1), 0)
cls_ind = self.class_to_ind[obj.find('name').text.lower().strip()]
boxes = [(x2 + x1) / 2.0, (y2 + y1) / 2.0, x2 - x1, y2 - y1]
x_ind = int(boxes[0] * self.cell_size / self.image_size)
y_ind = int(boxes[1] * self.cell_size / self.image_size)
if label[y_ind, x_ind, 0] == 1:
continue
label[y_ind, x_ind, 0] = 1
label[y_ind, x_ind, 1:5] = boxes
label[y_ind, x_ind, 5 + cls_ind] = 1
return label, len(objs)
| [
"noreply@github.com"
] | noreply@github.com |
46e70c92c945efb121e546f20f6ad5f36000bc5d | 3830a28419367f66491bf05c39a53e09943b8d38 | /HW5-4_WeijiaCheng.py | d4117bbc1efbcc25683cf0fbd024d8af878715b6 | [] | no_license | weijiacheng123/MIS3301_PythonHW | 553248d2e603baaf867cd73f4b1c87e67f9adbae | 6987b493505dffa5ccaf911ac3c560957e73ad05 | refs/heads/master | 2023-07-29T21:30:27.544904 | 2021-10-02T21:04:49 | 2021-10-02T21:04:49 | 412,910,120 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,299 | py | # The following is used as a global constant the contribution rate.
CONTRIBUTION_RATE = 0.05
def main():
gross_pay = float(input('Enter the gross pay: '))
bonus = float(input('Enter the amount of bonuses: '))
A = show_pay_contrib(gross_pay)
B = show_bonus_contrib(bonus)
show_total_contrib(A,B)
# The show_pay_contrib function accepts the gross
# pay as an argument and displays the retirement
# contribution for that amount of pay.
def show_pay_contrib(gross):
contrib_pay = gross * CONTRIBUTION_RATE
print('Contribution for gross pay: $',
format(contrib_pay, ',.2f'),
sep='')
return contrib_pay
# The show_bonus_contrib function accepts the
# bonus amount as an argument and displays the
# retirement contribution for that amount of pay.
def show_bonus_contrib(bonus):
contrib_bonus = bonus * CONTRIBUTION_RATE
print('Contribution for bonuses: $',
format(contrib_bonus, ',.2f'),
sep='')
return contrib_bonus
# The total_contribution function calculate
# the company’s total contribution for each employee.
def show_total_contrib(A,B):
contrib_total = A + B
print('Total contribution: $',
format(contrib_total, ',.2f'),
sep='')
# Call the main function.
main()
| [
"weijia_cheng1@baylor.edu"
] | weijia_cheng1@baylor.edu |
7b0b292babcbbb4b60a982708335b52f4a952e77 | dddde4af357478086ebda82d4ae2fc4e0ef7648a | /angularblog/urls.py | 6df6e8bf707ae8973436aff9001dd59ba03f17c5 | [] | no_license | zishanjawed/Team-Wave | cbcb33e9f514c02b86c329974e277b9a1118ffa2 | 35f3c1c50dcdfabb7d0d3bb9164b21ad6349cff6 | refs/heads/master | 2023-03-20T15:25:41.369941 | 2021-03-08T10:09:35 | 2021-03-08T10:09:35 | 345,611,177 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 810 | py | """angularblog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
import blog.urls
urlpatterns = [
path('admin/', admin.site.urls),
path('',include(blog.urls))
]
| [
"zishanexplore@gmail.com"
] | zishanexplore@gmail.com |
d0332e02065bc60741c313d243c08d4191eda31b | 101c7b6ebf1b43b0b469100b2d8cac5cdb0dc1da | /chainer/functions/normalization/l2_normalization.py | a8885c6a1ddaa918ea34d18e30c8f672c3d5e6f6 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | yanii/chainer | 62929d69241dc25d9df87b5845b276a7e1e0e2a0 | c6f65db5689fd3ce99b101b89b5dbef42b0477d1 | refs/heads/master | 2021-01-12T21:17:37.535711 | 2016-07-14T11:23:18 | 2016-07-14T11:23:18 | 63,348,758 | 1 | 1 | null | 2016-07-14T15:36:51 | 2016-07-14T15:36:51 | null | UTF-8 | Python | false | false | 3,119 | py | import numpy
from chainer import cuda
from chainer import function
from chainer.utils import array
from chainer.utils import type_check
class NormalizeL2(function.Function):
"""L2 normalization"""
def __init__(self, eps=1e-5):
self.eps = eps
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 1)
x_type, = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
)
def forward_cpu(self, inputs):
x = array.as_mat(inputs[0])
norm = numpy.linalg.norm(x, axis=1) + self.eps
return x / norm[:, numpy.newaxis],
def forward_gpu(self, inputs):
x = array.as_mat(inputs[0])
l2norm_kernel = cuda.cupy.ReductionKernel(
'T x, float32 eps',
'T y',
'x * x',
'a + b',
'y = sqrt(a) + eps',
'0',
'l2norm'
)
norm = cuda.cupy.broadcast_to(
l2norm_kernel(x, self.eps, axis=1).reshape(-1, 1),
x.shape
)
return x / norm,
def backward_cpu(self, inputs, gy):
x = inputs[0]
gy = gy[0]
norm = numpy.linalg.norm(x, axis=1) + self.eps
norm = norm[:, numpy.newaxis]
gx = gy * norm - (x * gy).sum(axis=1)[:, numpy.newaxis] * x / norm
gx = gx / norm**2
return gx,
def backward_gpu(self, inputs, gy):
x = inputs[0]
gy = gy[0]
l2norm_kernel = cuda.cupy.ReductionKernel(
'T x, float32 eps',
'T y',
'x * x',
'a + b',
'y = sqrt(a) + eps',
'0',
'l2norm'
)
norm = cuda.cupy.broadcast_to(
l2norm_kernel(x, self.eps, axis=1).reshape(-1, 1),
x.shape
)
x_gy = cuda.cupy.broadcast_to(
(x * gy).sum(axis=1, keepdims=True),
x.shape
)
gx = cuda.elementwise(
'T gy, T x, T x_gy, T norm',
'T gx',
'gx = (gy * norm - x_gy * x / norm) / (norm * norm)',
'l2_bwd')(gy, x, x_gy, norm)
return gx,
def normalize(x, eps=1e-5):
"""L2 norm squared (a.k.a. Euclidean norm).
This function implements L2 normalization on a 1D vector. No reduction
is done along batch axis. Let :math:`x` be an input vector of dimension
:math:`(N, K)`, where :math:`N` and :math:`K` denote mini-batchsize and the
dimension of the input variable. Then, this function computes an output
vector :math:`y` by the following equation:
.. math::
y_i = {x_i \\over \\| x_i \\|_2}
:math:`eps` is used to avoid division by zero when :math:`x_i=0`
Args:
x (~chainer.Variable): Two dimensional output variable. The first
dimension is assumed to be the mini-batch dimension.
eps (float): Epsilon value for numerical stability.
Returns:
~chainer.Variable: Two dimensional output variable, the same shape
as :math:`x`.
"""
return NormalizeL2(eps)(x)
| [
"amitibo@campus.technion.ac.il"
] | amitibo@campus.technion.ac.il |
3ac40ce48b69bac9a44b9ef69787b2fe2b9078b5 | 250787a6ef3507dbcec77d4d8021f2fe7f069287 | /Code/pythonCrawler/pyutils/common/DateUtils.py | 909123095cc8fca4e1379ebd40b56197afbabec8 | [] | no_license | mayl1/pythonCrawler | ba9ab4aad00fefda3a5872d3d895fc1dfe146acc | cd48a427887dac0e342d079a186ad6321d58a62f | refs/heads/master | 2021-04-28T01:18:49.758492 | 2018-02-21T01:10:45 | 2018-02-21T01:10:45 | 122,274,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,150 | py | # -*- coding: utf-8 -*-
"""
Date Library - DateUtils
~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2017 by Dongzhizhong
:license: WTFPL (Do What the Fuck You Want to Public License).
"""
"""
Python 日期和时间
Python 程序能用很多方式处理日期和时间,转换日期格式是一个常见的功能。
Python 提供了一个 time 和 calendar 模块可以用于格式化日期和时间。
时间间隔是以秒为单位的浮点小数。
每个时间戳都以自从1970年1月1日午夜(历元)经过了多长时间来表示。
Python 的 time 模块下有很多函数可以转换常见日期格式。如函数time.time()用于获取当前时间戳, 如下实例:
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import time; # 引入time模块
ticks = time.time()
print "当前时间戳为:", ticks
以上实例输出结果:
当前时间戳为: 1459994552.51
时间戳单位最适于做日期运算。但是1970年之前的日期就无法以此表示了。太遥远的日期也不行,UNIX和Windows只支持到2038年。
什么是时间元组?
很多Python函数用一个元组装起来的9组数字处理时间:
序号 字段 值
0 4位数年 2008
1 月 1 到 12
2 日 1到31
3 小时 0到23
4 分钟 0到59
5 秒 0到61 (60或61 是闰秒)
6 一周的第几日 0到6 (0是周一)
7 一年的第几日 1到366 (儒略历)
8 夏令时 -1, 0, 1, -1是决定是否为夏令时的旗帜
上述也就是struct_time元组。这种结构具有如下属性:
序号 属性 值
0 tm_year 2008
1 tm_mon 1 到 12
2 tm_mday 1 到 31
3 tm_hour 0 到 23
4 tm_min 0 到 59
5 tm_sec 0 到 61 (60或61 是闰秒)
6 tm_wday 0到6 (0是周一)
7 tm_yday 1 到 366(儒略历)
8 tm_isdst -1, 0, 1, -1是决定是否为夏令时的旗帜
获取当前时间
从返回浮点数的时间辍方式向时间元组转换,只要将浮点数传递给如localtime之类的函数。
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import time
localtime = time.localtime(time.time())
print "本地时间为 :", localtime
以上实例输出结果:
本地时间为 : time.struct_time(tm_year=2016, tm_mon=4, tm_mday=7, tm_hour=10, tm_min=3, tm_sec=27, tm_wday=3, tm_yday=98, tm_isdst=0)
获取格式化的时间
你可以根据需求选取各种格式,但是最简单的获取可读的时间模式的函数是asctime():
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import time
localtime = time.asctime( time.localtime(time.time()) )
print "本地时间为 :", localtime
以上实例输出结果:
本地时间为 : Thu Apr 7 10:05:21 2016
格式化日期
我们可以使用 time 模块的 strftime 方法来格式化日期,:
time.strftime(format[, t])
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import time
# 格式化成2016-03-20 11:45:39形式
print time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
# 格式化成Sat Mar 28 22:24:24 2016形式
print time.strftime("%a %b %d %H:%M:%S %Y", time.localtime())
# 将格式字符串转换为时间戳
a = "Sat Mar 28 22:24:24 2016"
print time.mktime(time.strptime(a,"%a %b %d %H:%M:%S %Y"))
以上实例输出结果:
2016-04-07 10:25:09
Thu Apr 07 10:25:09 2016
1459175064.0
python中时间日期格式化符号:
%y 两位数的年份表示(00-99)
%Y 四位数的年份表示(000-9999)
%m 月份(01-12)
%d 月内中的一天(0-31)
%H 24小时制小时数(0-23)
%I 12小时制小时数(01-12)
%M 分钟数(00=59)
%S 秒(00-59)
%a 本地简化星期名称
%A 本地完整星期名称
%b 本地简化的月份名称
%B 本地完整的月份名称
%c 本地相应的日期表示和时间表示
%j 年内的一天(001-366)
%p 本地A.M.或P.M.的等价符
%U 一年中的星期数(00-53)星期天为星期的开始
%w 星期(0-6),星期天为星期的开始
%W 一年中的星期数(00-53)星期一为星期的开始
%x 本地相应的日期表示
%X 本地相应的时间表示
%Z 当前时区的名称
%% %号本身
获取某月日历
Calendar模块有很广泛的方法用来处理年历和月历,例如打印某月的月历:
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import calendar
cal = calendar.month(2016, 1)
print "以下输出2016年1月份的日历:"
print cal;
以上实例输出结果:
以下输出2016年1月份的日历:
January 2016
Mo Tu We Th Fr Sa Su
1 2 3
4 5 6 7 8 9 10
11 12 13 14 15 16 17
18 19 20 21 22 23 24
25 26 27 28 29 30 31
Time 模块
Time 模块包含了以下内置函数,既有时间处理相的,也有转换时间格式的:
序号 函数及描述
1 time.altzone
返回格林威治西部的夏令时地区的偏移秒数。如果该地区在格林威治东部会返回负值(如西欧,包括英国)。对夏令时启用地区才能使用。
2 time.asctime([tupletime])
接受时间元组并返回一个可读的形式为"Tue Dec 11 18:07:14 2008"(2008年12月11日 周二18时07分14秒)的24个字符的字符串。
3 time.clock( )
用以浮点数计算的秒数返回当前的CPU时间。用来衡量不同程序的耗时,比time.time()更有用。
4 time.ctime([secs])
作用相当于asctime(localtime(secs)),未给参数相当于asctime()
5 time.gmtime([secs])
接收时间辍(1970纪元后经过的浮点秒数)并返回格林威治天文时间下的时间元组t。注:t.tm_isdst始终为0
6 time.localtime([secs])
接收时间辍(1970纪元后经过的浮点秒数)并返回当地时间下的时间元组t(t.tm_isdst可取0或1,取决于当地当时是不是夏令时)。
7 time.mktime(tupletime)
接受时间元组并返回时间辍(1970纪元后经过的浮点秒数)。
8 time.sleep(secs)
推迟调用线程的运行,secs指秒数。
9 time.strftime(fmt[,tupletime])
接收以时间元组,并返回以可读字符串表示的当地时间,格式由fmt决定。
10 time.strptime(str,fmt='%a %b %d %H:%M:%S %Y')
根据fmt的格式把一个时间字符串解析为时间元组。
11 time.time( )
返回当前时间的时间戳(1970纪元后经过的浮点秒数)。
12 time.tzset()
根据环境变量TZ重新初始化时间相关设置。
Time模块包含了以下2个非常重要的属性:
序号 属性及描述
1 time.timezone
属性time.timezone是当地时区(未启动夏令时)距离格林威治的偏移秒数(>0,美洲;<=0大部分欧洲,亚洲,非洲)。
2 time.tzname
属性time.tzname包含一对根据情况的不同而不同的字符串,分别是带夏令时的本地时区名称,和不带的。
日历(Calendar)模块
此模块的函数都是日历相关的,例如打印某月的字符月历。
星期一是默认的每周第一天,星期天是默认的最后一天。更改设置需调用calendar.setfirstweekday()函数。模块包含了以下内置函数:
序号 函数及描述
1 calendar.calendar(year,w=2,l=1,c=6)
返回一个多行字符串格式的year年年历,3个月一行,间隔距离为c。 每日宽度间隔为w字符。每行长度为21* W+18+2* C。l是每星期行数。
2 calendar.firstweekday( )
返回当前每周起始日期的设置。默认情况下,首次载入caendar模块时返回0,即星期一。
3 calendar.isleap(year)
是闰年返回True,否则为false。
4 calendar.leapdays(y1,y2)
返回在Y1,Y2两年之间的闰年总数。
5 calendar.month(year,month,w=2,l=1)
返回一个多行字符串格式的year年month月日历,两行标题,一周一行。每日宽度间隔为w字符。每行的长度为7* w+6。l是每星期的行数。
6 calendar.monthcalendar(year,month)
返回一个整数的单层嵌套列表。每个子列表装载代表一个星期的整数。Year年month月外的日期都设为0;范围内的日子都由该月第几日表示,从1开始。
7 calendar.monthrange(year,month)
返回两个整数。第一个是该月的星期几的日期码,第二个是该月的日期码。日从0(星期一)到6(星期日);月从1到12。
8 calendar.prcal(year,w=2,l=1,c=6)
相当于 print calendar.calendar(year,w,l,c).
9 calendar.prmonth(year,month,w=2,l=1)
相当于 print calendar.calendar(year,w,l,c)。
10 calendar.setfirstweekday(weekday)
设置每周的起始日期码。0(星期一)到6(星期日)。
11 calendar.timegm(tupletime)
和time.gmtime相反:接受一个时间元组形式,返回该时刻的时间辍(1970纪元后经过的浮点秒数)。
12 calendar.weekday(year,month,day)
返回给定日期的日期码。0(星期一)到6(星期日)。月份为 1(一月) 到 12(12月)。
"""
import time
class DateUtils(object):
"""
返回当前时间的时间戳(1970纪元后经过的秒数)。
"""
@staticmethod
def getSysTimeSecond():
return int(time.time())
"""
返回当前时间的时间戳(1970纪元后经过的毫秒数)。
"""
@staticmethod
def getSysMillisecond():
return int(time.time() * 1000)
"""
按照格式要求返回当前时间。
DateUtils.getSysTimeFormat("%Y-%m-%d %H:%M:%S")
"""
@staticmethod
def getSysTimeFormat(format):
return time.strftime(format, time.localtime())
"""
将格式字符串转换为时间戳(1970纪元后经过的秒数)。
DateUtils.getTimeSecondFormat("2018-01-03 11:44:41", "%Y-%m-%d %H:%M:%S")
"""
@staticmethod
def getTimeSecondFormat(timeText, format):
return int(time.mktime(time.strptime(timeText, format)))
"""
根据时间戳(1970纪元后经过的秒数)秒数返回要求格式的字符串。
DateUtils.getTimeFormatBySecond(1514951081, "%Y-%m-%d %H:%M:%S")
"""
@staticmethod
def getTimeFormatBySecond(secondValue, format):
return time.strftime(format, time.localtime(secondValue))
if __name__ == '__main__':
print("getSysTime:",DateUtils.getSysTimeSecond())
print("getSysMillisecond:", DateUtils.getSysMillisecond())
print("getSysTimeFormat:", DateUtils.getSysTimeFormat("%Y-%m-%d %H:%M:%S"))
print("getTimeSecondFormat:", DateUtils.getTimeSecondFormat("2018-01-03 11:44:41", "%Y-%m-%d %H:%M:%S"))
print("getTimeFormatBySecond:", DateUtils.getTimeFormatBySecond(1514951081, "%Y-%m-%d %H:%M:%S")) | [
"13718706737@163.com"
] | 13718706737@163.com |
b5967a50b86a656975b1aac99eda67cd701a9789 | 5b4790ff194c47ecc2da2c583549b566c7e6d2b2 | /config.py | d9371ba25bf0611e340ef1b90fc65c91a6b27fa5 | [] | no_license | RyanAquino/slack-bot-ryanaq | 15bee8a346a7e362b5d416174274540d426e8a78 | 3a2fc3a83e641002a8e77f78e0751d831a0cf5d6 | refs/heads/master | 2022-12-10T14:29:56.612421 | 2020-06-30T10:11:54 | 2020-06-30T10:11:54 | 143,090,581 | 1 | 0 | null | 2022-12-08T11:00:09 | 2018-08-01T01:56:33 | Python | UTF-8 | Python | false | false | 157 | py | # Twitter Developer access keys
API_KEY = ''
API_SECRET = ''
ACCESS_TOKEN = ''
ACCESS_SECRET = ''
# Slack API incoming webhook url
HOOK_URL = ''
OAUTH = ''
| [
"ryan.aquino7110@gmail.com"
] | ryan.aquino7110@gmail.com |
822ea425411bdb6781d094c73c356248dc0ce97c | d3380c20a1b0b5a90e7b67bf86d90074940cdb7e | /getCookie.py | 07d4205bc6125fa1af1a895db395deb4957f3d79 | [] | no_license | feiteng/LCSubmit | f0ccd95e48cf67eab89af169682117756b9a7a7d | baeeadff377c1ee6834e4e2518c5ab9370374c5a | refs/heads/master | 2023-04-27T20:31:28.040308 | 2021-05-14T03:26:35 | 2021-05-14T03:26:35 | 366,865,400 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 570 | py | import os, configparser
# import setCookie
def getFromFile(str):
config = configparser.ConfigParser()
try:
config.read('cookies.ini')
except Exception as err:
print(err)
print('Error finding cookies config file')
return '#'
return config['cookies'][str]
def getCSRFToken():
# if selection == 1:
try:
return getFromFile('CSRFTOKEN')
except:
return '#'
def getLeetcodeSession():
# if selection == 1:
try:
return getFromFile('LEETCODE_SESSION')
except:
return '#'
| [
"li.feiteng@gmail.com"
] | li.feiteng@gmail.com |
1c4bdff37adc167430e03f699792f53a4f2a4030 | c5e2022993f33e9b59c8d815d664ca86db706cea | /products/migrations/0005_auto_20200607_1858.py | 943448e6f39a02bcfc712a3430f6a16569ee906d | [] | no_license | Code-Institute-Submissions/ColourPerfect-e-commerce | c38b4b119cbbd73bb8ff2e568a3fb19e5049001e | fc5dd74c087fee5b990a1216148b7764c6b289ce | refs/heads/master | 2022-11-11T18:54:18.997957 | 2020-07-10T09:18:01 | 2020-07-10T09:18:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 409 | py | # Generated by Django 3.0.6 on 2020-06-07 17:58
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0004_auto_20200607_1849'),
]
operations = [
migrations.AlterField(
model_name='colour',
name='name',
field=models.CharField(blank=True, max_length=254, null=True),
),
]
| [
"amalinowska.p@gmail.com"
] | amalinowska.p@gmail.com |
099c826ca381aac22fe38edc219e52b6862f4c2c | 92f466877c55b154d20c2281e33de05ff03fd9ab | /Multivariate-Linear-Regression/hypothesis.py | 509ce0c4446e0ddc58a6443945b265ff77dff904 | [] | no_license | linonymous/Machine-Learning-Algorithms | 3c33be3ffddd9899e81fb9c32e5c7129ef74e6e9 | 9cf2403ffcca32621eb784d7cc9dbf76edbc3e3c | refs/heads/master | 2021-01-18T05:00:25.109801 | 2017-11-23T18:30:33 | 2017-11-23T18:30:33 | 84,276,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | import numpy as np
def hypothesis(input_x, theta):
return np.dot(input_x, theta) | [
"iamlinonymous@gmail.com"
] | iamlinonymous@gmail.com |
5aad9c32a7809332a91ba9a85801d876ebcd853b | 23b3242a18da2281e82ac7688d43939b217a1316 | /coordinator_start.py | 1b7cf0f5ce8013ac0f77c4e936345ba433a397b3 | [
"MIT"
] | permissive | danielczech/meerkat-backend-interface | 517e18bfa32ef78a938ea790f7040ff3831138e1 | e21f05eb174ec1f8005067f67aa15fa795b7b5a1 | refs/heads/master | 2023-06-10T23:49:30.963047 | 2021-06-29T20:51:21 | 2021-06-29T20:51:21 | 176,833,160 | 1 | 1 | MIT | 2019-04-17T20:59:15 | 2019-03-20T23:35:09 | Python | UTF-8 | Python | false | false | 1,757 | py | #!/usr/bin/env python
from optparse import OptionParser
import signal
import sys
import logging
from meerkat_backend_interface.coordinator import Coordinator
from meerkat_backend_interface.logger import log, set_logger
def cli(prog = sys.argv[0]):
"""Command line interface.
"""
usage = "usage: %prog [options]"
parser = OptionParser(usage=usage)
parser.add_option('-p', '--port', dest='port', type=int,
help='Redis port to connect to', default=6379)
parser.add_option('-c', '--config', dest='cfg_file', type=str,
help='Config filename (yaml)', default = 'config.yml')
parser.add_option('-t', '--triggermode', dest='triggermode', type=str,
help="""Trigger mode:
\'idle\': PKTSTART will not be sent.
\'auto\': PKTSTART will be sent each
time a target is tracked.
\'armed\': PKTSTART will only be sent for
the next target. Thereafter, the state
will transition to idle.'
""",
default = 'idle')
(opts, args) = parser.parse_args()
main(port=opts.port, cfg_file=opts.cfg_file, triggermode=opts.triggermode)
def on_shutdown():
log.info("Coordinator shutting down.")
sys.exit()
def main(port, cfg_file, triggermode):
log = set_logger(log_level = logging.DEBUG)
log.info("Starting Coordinator")
coord = Coordinator(port, cfg_file, triggermode)
signal.signal(signal.SIGINT, lambda sig, frame: on_shutdown())
coord.start()
if(__name__ == '__main__'):
cli()
| [
"daniel.czech@protonmail.com"
] | daniel.czech@protonmail.com |
7c97eebca3c268aac0fac67c275cb89a004e90c4 | 7d6f530215177172c1aff4f7d52ec1b96db51612 | /backend/src/config.py | aa133308bd661935eea90e88b09dca49f795038f | [] | no_license | vdoan98/rss-feed | 6d3de331e3939e37b332abfe44b208e0772a6a0b | 5775a70a9c04efb97780bdf3685579dd9724efc9 | refs/heads/master | 2022-12-11T11:33:00.370738 | 2020-09-15T22:00:36 | 2020-09-15T22:00:36 | 294,489,395 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 316 | py |
SECRET_KEY = 'emmanualkant'
DEBUG = True
CLIENT_ID = '9N4Gb3f6empZr5irxRVOFaS4BIPZUIqG'
CLIENT_SECRET = 'J-KSi3l9jVoRNp_SWe3dVzx4rctnyA5oV11OfUYKbUlvnG1yr6yc5I9mHA1ML1Ta'
API_URL = 'https://vdoan98.us.auth0.com',
ACCESS_TOKEN='https://vdoan98.us.auth0.com/token'
AUTHORIZE_URL = 'https://vdoan98.us.auth0.com/token' | [
"vdoan98@gmail.com"
] | vdoan98@gmail.com |
9b1e4ab95d98b1510916fa7f45ad4267cf1424e8 | 47dad1897990394883f2c16b0904e4cdae3e054c | /datadriven/datasets/friedman/friedman.py | 0fed585ae10c6dfe20af20e9a798220d5ee7a5ce | [
"LicenseRef-scancode-generic-exception",
"BSD-3-Clause"
] | permissive | QianWanghhu/SGpp | 430207e3f533eb96d57540b00475d303b0d955e5 | c36a95127d0ec833d4f45b8ed44ad3ffe482ae64 | refs/heads/master | 2020-09-20T03:37:26.170177 | 2019-11-26T10:43:02 | 2019-11-26T10:43:02 | 224,367,076 | 2 | 0 | NOASSERTION | 2019-11-27T07:08:00 | 2019-11-27T07:07:59 | null | UTF-8 | Python | false | false | 3,837 | py | # Copyright (C) 2008-today The SG++ project
# This file is part of the SG++ project. For conditions of distribution and
# use, please see the copyright notice provided with SG++ or at
# sgpp.sparsegrids.org
#!/usr/bin/python
import sys, os, re, optparse, random, math
sys.path.append("../../bin/")
import tools
from pysgpp import *
# parse args
parser = optparse.OptionParser()
parser.set_usage('''%prog options
Creates data for friedman1-3
''')
parser.add_option("-f", "--friedman", dest="friedman", action="store", type="int",
default=None,
help="Which friedman dataset to create (1-3)")
parser.add_option("--seed", dest="seed", action="store", type="int",
default=None,
help="Seed for random generator (optional)")
parser.add_option("-o", "--outfile", dest="outfile", action="store",
default=None,
help="Filename of the outfile. Otherwise output to stdout.")
parser.add_option("-N", dest="N", action="store", type="int",
default=None,
help="Number of data points to create")
(options,args)=parser.parse_args()
# check arguments
if not options.N:
print "-N missing"
parser.parse_args(['-h'])
if not options.friedman or options.friedman < 1 or options.friedman > 3:
print "-f missing or wrong"
parser.parse_args(['-h'])
# init
if options.seed:
random.seed(options.seed)
# friedman1:
if options.friedman == 1:
namestring = 'Friedman1, %d data points' % (options.N)
X = DataMatrix(options.N, 11)
p = DataVector(11)
dds = range(10)
for i in xrange(options.N):
for d in dds:
p[d] = random.random()
eps = random.normalvariate(0.0,1.0)
# $ 10\sin(\pi x_0 x_1) + 20(x_2-0.5)^2 + 10x_3 + 5x_4 + \epsilon $
p[10] = 10.0*math.sin(math.pi*p[0]*p[1]) + 20.0*(p[2]-0.5)**2 + 10*p[3] + 5*p[4] + eps
X.setRow(i,p)
# friedman2:
elif options.friedman == 2:
namestring = 'Friedman2, %d data points' % (options.N)
X = DataMatrix(options.N, 5)
p = DataVector(5)
for i in xrange(options.N):
p[0] = random.uniform(0, 100)
p[1] = random.uniform(40*math.pi, 560*math.pi)
p[2] = random.uniform(0, 1)
p[3] = random.uniform(1, 11)
eps = random.normalvariate(0.0,125.0)
# $ \left( x_0^2 + \left( x_1 x_2 - (x_1 x_3)^{-1} \right)^2 \right)^0.5 + \epsilon $
p[4] = ( p[0]**2 + (p[1]*p[2] - 1.0/(p[1]*p[3]))**2 )**0.5 + eps
X.setRow(i,p)
# friedman3:
elif options.friedman == 3:
namestring = 'Friedman3, %d data points' % (options.N)
X = DataMatrix(options.N, 5)
p = DataVector(5)
for i in xrange(options.N):
p[0] = random.uniform(0, 100)
p[1] = random.uniform(40*math.pi, 560*math.pi)
p[2] = random.uniform(0, 1)
p[3] = random.uniform(1, 11)
eps = random.normalvariate(0.0,0.1)
# $ \atan \left( \left( x_1 x_2 - (x_1 x_3)^{-1} \right) / x_0 \right) + \epsilon $
p[4] = math.atan( (p[1]*p[2] - 1.0/(p[1]*p[3])) / p[0] ) + eps
X.setRow(i,p)
else:
sys.exit(1)
if options.outfile and ".csv" in options.outfile:
from numpy import savetxt
#header = ','.join(['x%d'%i for i in xrange(X.getNcols()-1)] + ['classes'])
savetxt(options.outfile, X.array(), fmt='%.12f', delimiter = ',')
sys.exit(1)
elif options.outfile:
fd = tools.gzOpen(options.outfile, 'w')
else:
fd = sys.stdout
fd.write("""@RELATION "%s"\n\n""" % (namestring))
for d in range(X.getNcols()-1):
fd.write("""@ATTRIBUTE x%d NUMERIC\n""" % (d))
fd.write("""@ATTRIBUTE class NUMERIC\n\n@Data\n""")
for i in xrange(X.getNrows()):
X.getRow(i, p)
fd.write(','.join([str(p[d]) for d in range(X.getNcols())])+"\n")
if options.outfile:
fd.close()
| [
"roehner@in.tum.de"
] | roehner@in.tum.de |
536cec1f4e6c3da84f27eaf0da067e7082bd07f9 | 5fb17857491c661c4b070417a04b4a428b0dbdb6 | /Software/Image Process & Robot Tracking/robot_tracker.py | 5824d0314103685b1a63a7695be8a6f31e1060e8 | [
"MIT"
] | permissive | EWA-Mechatronics/ME462-Project | 7a690ecc85cdd1ccb18e1f6e20b4858eac075edc | 23ba7331ffe845c1256209ca465cecec5092c8fd | refs/heads/master | 2021-02-08T04:41:03.907211 | 2020-08-05T03:51:42 | 2020-08-05T03:51:42 | 244,110,376 | 5 | 5 | MIT | 2020-03-12T12:37:47 | 2020-03-01T08:04:33 | null | UTF-8 | Python | false | false | 4,150 | py | '''
This code used for to dectect robot markers and from them extract
location and orientation of robots w.r.t. origin marker.
'''
import cv2
from cv2 import aruco
import numpy as np
import math
camera = cv2.VideoCapture(0 + cv2.CAP_DSHOW) # Enables camera
def isRotationMatrix(R) :
#Checks for valid rotation matrix
Rt = np.transpose(R)
shouldBeIdentity = np.dot(Rt, R)
I = np.identity(3, dtype = R.dtype)
n = np.linalg.norm(I - shouldBeIdentity)
return n < 1e-6
def rotationMatrixToEulerAngles(R) :
# Turns rotation matrix to radians.
assert(isRotationMatrix(R))
sy = math.sqrt(R[0,0] * R[0,0] + R[1,0] * R[1,0])
singular = sy < 1e-6
if not singular :
x = math.atan2(R[2,1] , R[2,2])
y = math.atan2(-R[2,0], sy)
z = math.atan2(R[1,0], R[0,0])
else :
x = math.atan2(-R[1,2], R[1,1])
y = math.atan2(-R[2,0], sy)
z = 0
return np.array([x, y, z])
def orientation(rvec):
#Turns rotation vector to rotation matrix then turns it to radians.
rod,_ = cv2.Rodrigues(rvec)
ori = rotationMatrixToEulerAngles(rod)
return ori[2]
def coordinate(o_x, o_y, x, y):
# Gives coordinates of a marker w.r.t. marker at origin.
f_x = (x - o_x)*1000
f_y = (y - o_y)*1000
return f_x, f_y
#Camera matrix found by calibration.
mtx = np.array([[736.84998156, 0. , 348.02940053],
[ 0. , 736.84998156, 269.56120894],
[ 0. , 0. , 1. ]])
#Distortion coefficents of camera found by calibration.
dist = np.array([[-5.87090932e+01],
[ 8.58010534e+02],
[ 5.12005939e-05],
[ 2.27767059e-03],
[ 7.60525477e+02],
[-5.82126026e+01],
[ 8.28492084e+02],
[ 1.20930421e+03],
[ 0.00000000e+00],
[ 0.00000000e+00],
[ 0.00000000e+00],
[ 0.00000000e+00],
[ 0.00000000e+00],
[ 0.00000000e+00]])
NoneType=type(None)
while True:
#Takes image from camera and converts it to gray image.
return_value, img = camera.read()
img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#Choosen aruco marker library.
dictionary = aruco.Dictionary_get(aruco.DICT_ARUCO_ORIGINAL)
parameters = aruco.DetectorParameters_create()
#Detection of markers.
marker_corners, marker_IDs, rejected_canditates = aruco.detectMarkers(img, dictionary, parameters=parameters)
#Getting rotation and translation vectors from detected markers.
rvec, tvec, _ = aruco.estimatePoseSingleMarkers(marker_corners, 0.04, mtx, dist)
datas = []
if type(marker_IDs) != NoneType:
if [76] in marker_IDs:
#Marker with no 76 choosen as origin and its aspects found to create reference.
pos_of_origin = marker_IDs.tolist().index([76])
origin_x = tvec[pos_of_origin][0][0]
origin_y = tvec[pos_of_origin][0][1]
origin_orient = orientation(rvec[pos_of_origin])
for i in range(len(marker_IDs)):
data = []
if marker_IDs[i] == [76]:
continue
#Calculations of positon and orientation of detected robot markers w.r.t. origin.
orient = orientation(rvec[i]) - origin_orient
x_rev = tvec[i][0][0]
y_rev = tvec[i][0][1]
x, y = coordinate(origin_x, origin_y, x_rev, y_rev)
data.extend((marker_IDs[i][0], x, y, orient))
datas.append(data)
if datas != []:
#print(datas)
f = open("t_r_of_robots.txt", mode='w')
f.write("{}".format(datas))
f.close()
'''
#Monitoring camera feed to debug.
final = aruco.drawDetectedMarkers(img,marker_corners,marker_IDs)
cv2.imshow("image", final)
k = cv2.waitKey(1)
if k%256 == 27:# ESC pressed
break
'''
| [
"noreply@github.com"
] | noreply@github.com |
2bf5c85b7461a762b5d02854d7f70fd155e8958c | 5eaedd3842270186750e07725fd582e6c79c0720 | /hasilTest/plot.py | da91f391e93092f6508ebb873767d8507d31ea41 | [] | no_license | lefalya/papers-docs | efe51daaaa913442922b6144e25460965f61ed02 | 5b8486140752ab7a89c1dab07be56a8f67af7751 | refs/heads/master | 2020-05-20T15:23:52.393984 | 2019-05-09T15:05:33 | 2019-05-09T15:05:33 | 185,643,584 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 193 | py | import matplotlib.pyplot as plt
dot = []
with open('./doppler_compensator_test.txt', 'r') as f :
data = f.readlines()
print(data)
plt.plot(data, range(len(data)))
plt.show()
| [
"falya.bernino@gmail.com"
] | falya.bernino@gmail.com |
4a868a3af85b9f9f76134d6518281c3d68618b13 | 8d517cc03cbbfc1b80d7403f62360ee1074067a0 | /DLS_websocket/GUARD/server.py | 928fb54bc4d977cf014af655a9a5246e1b20abfc | [] | no_license | SmallYi/Websocket_Demo | c803ec05ee983c28b9241d910830542fb818d463 | 35db86200c3ef546df553c1a685f2d6453d7e13f | refs/heads/master | 2020-07-03T12:03:44.153144 | 2019-08-12T09:30:38 | 2019-08-12T09:30:38 | 201,894,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,382 | py | import SocketServer
import time
from kafkaClass import Kafka_producer
class Server(SocketServer.BaseRequestHandler):
def handle(self):
conn=self.request
print(conn)
kafkaproducer=Kafka_producer('master:9092,slave1:9092,slave2:9092','HelloKafka')
error_flag=0
data_r=''
while True:
data=data_r+conn.recv(1500)
if not data:
print("no data!")
break
while True:
list1=data.split('\t',1)
if not len(list1)==2:
data_r=data
break
try:
row_l=int(list1[0])
except:
print("list1",list1)
print("list1[0]",list1[0])
error_flag=1
print("row_length transform error!")
break
if len(list1[1])<row_l:
data_r='\t'.join(list1)
break
print(list1[1][0:row_l])
kafkaproducer.senddata(list1[1][0:row_l])
data=list1[1][row_l:]
if error_flag:
break
conn.close()
if __name__=='__main__':
ip,port='192.168.138.201',8888
server=SocketServer.ThreadingTCPServer((ip,port),Server)
server.serve_forever()
| [
"734966463@qq.com"
] | 734966463@qq.com |
60bca6fdd3d12d7f7f9c171b16b437469a0d92af | 35a1be2135023d8fac9e0359ad7ac56b25abc0a9 | /main.py | 3387cc6372cdb122b57a9dfdb0b4834c08c98d7b | [] | no_license | mechbear14/mysterious-forest | 8243784ccef9cf655e326c6be70f6494ac3e5f28 | 8b1575e95136e02e4dde08cde542059a7b72b999 | refs/heads/master | 2021-04-19T13:53:36.345190 | 2020-03-26T04:05:55 | 2020-03-26T04:05:55 | 249,610,039 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | from world import Forest
forest = Forest(6, 5)
if __name__ == "__main__":
forest.go_to(0, 1)
forest.go_to(0, 2)
forest.go_to(0, 3)
forest.go_to(1, 1)
forest.go_to(0, 0)
forest.go_to(1, 0)
forest.go_to(1, 1)
forest.go_to(0, 2)
forest.go_to(0, 3)
forest.go_to(0, 4)
forest.go_to(0, 5)
forest.go_to(1, 0)
forest.go_to(2, 0)
forest.go_to(2, 1)
forest.go_to(2, 0)
forest.go_to(3, 0)
forest.go_to(4, 0)
forest.go_to(5, 0)
forest.go_to(6, 0)
| [
"no6xichengavenue@gmail.com"
] | no6xichengavenue@gmail.com |
76d77788dd84fd7c82660fbbd887258c0b4258f9 | 290247a61047bcb9af63a1bbb2aad90e3a62aa6b | /tests/test_basics.py | 51909a749e3f2a600be8ca8c8f57f6aab0b86aea | [] | no_license | johngngn/learnflasky | 19aa31b0ef8d63bdaef339a8adb8855c0304c22f | 301949e77022a612705fd49d16fddc615297a613 | refs/heads/master | 2021-01-10T09:58:41.797806 | 2016-01-16T10:16:08 | 2016-01-16T10:16:08 | 48,409,253 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 562 | py | import unittest
from flask import current_app
from app import create_app, db
class BasicsTestCase(unittest.TestCase):
def setUp(self):
self.app = create_app('testing')
self.app_context = self.app.app_context()
self.app_context.push()
db.create_all()
def tearDown(self):
db.session.remove()
db.drop_all()
self.app_context.pop()
def test_app_exisit(self):
self.assertFalse(current_app is None)
def test_app_is_testing(self):
self.assertTrue(current_app.config['TESTING']) | [
"a290873309@gmail.com"
] | a290873309@gmail.com |
f6cbe10ec60a3e67c3d01909eb6787f81d784725 | 52b5fa23f79d76883728d8de0bfd202c741e9c43 | /kubernetes/test/test_v1_horizontal_pod_autoscaler.py | 315840a3d5385881407d0c8128db6c771a02de99 | [] | no_license | kippandrew/client-python-tornado | 5d00810f57035825a84e37ff8fc89a7e79aed8da | d479dfeb348c5dd2e929327d800fe033b5b3b010 | refs/heads/master | 2021-09-04T13:01:28.275677 | 2018-01-18T23:27:34 | 2018-01-18T23:27:34 | 114,912,995 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen) # noqa: E501
OpenAPI spec version: v1.8.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import kubernetes.client
from kubernetes.client.models.v1_horizontal_pod_autoscaler import V1HorizontalPodAutoscaler # noqa: E501
from kubernetes.client.rest import ApiException
class TestV1HorizontalPodAutoscaler(unittest.TestCase):
"""V1HorizontalPodAutoscaler unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testV1HorizontalPodAutoscaler(self):
"""Test V1HorizontalPodAutoscaler"""
# FIXME: construct object with mandatory attributes with example values
# model = kubernetes.client.models.v1_horizontal_pod_autoscaler.V1HorizontalPodAutoscaler() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"andy@rstudio.com"
] | andy@rstudio.com |
89e06687f93fce54b05689e3792cc5692934b929 | da497ddf926b8791f3812c79543120215822216b | /icsbep/pu-sol-therm-012/openmc/case-14/generate_materials.py | 8eab1d8d6513479d03829680991c284d7261e01d | [] | no_license | mit-crpg/benchmarks | 55f38e569699554d07df254103e2f828dc5b4ff8 | 58e15679ec684b9e2f552df58099e3648b5708cc | refs/heads/master | 2022-05-17T12:27:45.590757 | 2022-05-09T15:07:00 | 2022-05-09T15:07:00 | 2,704,358 | 23 | 30 | null | 2019-11-11T16:35:27 | 2011-11-03T19:04:29 | Python | UTF-8 | Python | false | false | 1,822 | py | import openmc
mats = openmc.Materials()
mat = openmc.Material(1)
mat.name = "Plutonium nitrate solution (52.7 g/L)"
mat.set_density('sum')
mat.add_nuclide('Pu239', 9.86655e-05)
mat.add_nuclide('Pu240', 2.50004e-05)
mat.add_nuclide('Pu241', 7.41089e-06)
mat.add_nuclide('Pu242', 1.49702e-06)
mat.add_nuclide('Am241', 8.03099e-07)
mat.add_element('N', 1.78497e-03)
mat.add_element('O', 3.59564e-02)
mat.add_nuclide('H1', 6.24015e-02)
mat.add_element('Fe', 1.21850e-05)
mat.add_element('Cr', 3.91841e-06)
mat.add_element('Ni', 2.77719e-06)
mat.add_s_alpha_beta('c_H_in_H2O')
mats.append(mat)
mat = openmc.Material(2)
mat.name = "Air"
mat.set_density('sum')
mat.add_nuclide('O16', 1.0784e-05)
mat.add_nuclide('N14', 4.3090e-05)
mats.append(mat)
mat = openmc.Material(3)
mat.name = "Stainless steel"
mat.set_density('sum')
mat.add_element('Fe', 6.1344e-02)
mat.add_element('Cr', 1.6472e-02)
mat.add_element('Ni', 8.1050e-03)
mats.append(mat)
mat = openmc.Material(4)
mat.name = "Lucoflex"
mat.set_density('sum')
mat.add_element('C', 2.7365e-02)
mat.add_nuclide('H1', 4.1047e-02)
mat.add_element('Cl', 1.3682e-02)
mats.append(mat)
mat = openmc.Material(5)
mat.name = "Water"
mat.set_density('sum')
mat.add_nuclide('H1', 6.6688e-02)
mat.add_element('O', 3.3344e-02)
mat.add_s_alpha_beta('c_H_in_H2O')
mats.append(mat)
mat = openmc.Material(6)
mat.name = "Steel (pool wall)"
mat.set_density('sum')
mat.add_element('Fe', 8.5068e-02)
mat.add_element('C', 5.5545e-04)
mats.append(mat)
mat = openmc.Material(7)
mat.name = "Concrete"
mat.set_density('sum')
mat.add_nuclide('H1', 1.035e-02)
mat.add_nuclide('B10', 1.602e-06)
mat.add_element('O', 4.347e-02)
mat.add_element('Al', 1.563e-03)
mat.add_element('Si', 1.417e-02)
mat.add_element('Ca', 6.424e-03)
mat.add_element('Fe', 7.621e-04)
mats.append(mat)
mats.export_to_xml()
| [
"paul.k.romano@gmail.com"
] | paul.k.romano@gmail.com |
d62b463aa6a343af04deaa82256d72010e4e2d68 | 00d266aef2b9a23d6c00aed1049ea461eb19e2d1 | /karger_minimum_cut/minCut.py | f74e6b3c6f6564cd4b93e979d5a6900cecacf332 | [] | no_license | ancabilloni/algorithms_practice | 3f3992d982969b01ccfb16d453433e5e5e435d56 | 3bcc8d0a004eae11654ab4b977b3637dfc5e0ed6 | refs/heads/master | 2021-04-03T10:19:19.529173 | 2018-03-15T02:50:36 | 2018-03-15T02:50:36 | 123,497,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,702 | py | """
Given the file contains the adjacency list representation of an undirected graph. There are
200 vertices labeled 1 to 200. For each row, the first column is the vertex label, and rest of the entries are the adjacent vertices.
Task: Code and run the randomized contraction algorithm to compute the min cut for the graph.
"""
class loadData(object):
def __init__(self, filename):
self.data = []
self.org_data = []
with open(filename, 'r') as fh:
for line in fh:
self.org_data.append([int(x) for x in line.split()])
import random
class minCut(loadData):
def partion(self, array, l, r):
p = array[l]
i = l+1
for j in range(l+1, r):
if array[j] < p:
swap = array[j]
array[j] = array[i]
array[i] = swap
i += 1
array[l] = array[i-1]
array[i-1] = p
return array, i-1
def findElementIndex(self, array, element):
for j in range(len(array)):
if array[j] == element:
return j
return None
def updateArray(self, array, element):
index = self.findElementIndex(array, element)
if index is not None:
array = array[:index] + array[index+1:]
return array
return None
def min_cut(self):
data = self.org_data[:]
# print (self.org_data)
n = len(data)
min_cut = 0
# print (data[0])
vertices = [x for x in range(n)]
while n > 2:
# pick a random vertex index
i = vertices[random.randint(0, len(vertices)-1)]
# pick a random edge connected to contract
i_contracted = random.randint(1,len(data[i]) - 1)
_contractedNode = data[i][i_contracted]
# Remove contractedNode and Update chosen vertex
# Remove self-loop
self_loop = True
while self_loop:
temp_array_i = self.updateArray(data[i], _contractedNode)
temp_array_contracted = self.updateArray(data[_contractedNode-1], i+1)
if temp_array_i is not None:
data[i] = temp_array_i
data[_contractedNode-1] = temp_array_contracted
else:
self_loop = False
# Remove chosen vertex in contractedNode
data[i] = data[i] + data[_contractedNode-1][1:]
for vertex in data[_contractedNode-1][1:]:
data[vertex-1] = self.updateArray(data[vertex-1], _contractedNode)
data[vertex-1].append(i+1)
data[_contractedNode-1] = []
remove_vertex_ind = self.findElementIndex(vertices, _contractedNode-1)
vertices = vertices[:remove_vertex_ind] + vertices[remove_vertex_ind+1:]
# print ("after: ", vertices)
n = n - 1
min_cut = len(data[vertices[0]]) - 1
return min_cut
def minCutLoop(self, num_of_loop):
minCut = 10000
list_min =[]
for i in range(num_of_loop):
trial_cut = self.min_cut()
if minCut > trial_cut:
minCut = trial_cut
list_min.append(minCut)
return minCut, list_min
if __name__ == "__main__":
filename = "./kargerMinCut.txt"
# filename = './input_random_10_25.txt'
data = minCut(filename)
list_min_cut =[]
min_cut, list_min_cut = data.minCutLoop(100)
# list_min_cut.append(min_cut)
print (list_min_cut)
print (); print (min_cut)
| [
"anguyen3@rockets.utoledo.edu"
] | anguyen3@rockets.utoledo.edu |
09db562a43a4de24b3c2c642181d463e0a4b80ae | 6d9795fa1aafc0fa5316020aaa0eaa4f68b76229 | /sellproperty/models.py | 63aa8f893cb9dcbfbd6429438758eeac799571cd | [] | no_license | icerahi/immolists | 02d379a22c193e793b26e35828b5eebff33bf888 | 813333c3923385861f111bb7aa715aeb04108c3a | refs/heads/master | 2022-12-15T15:00:39.844142 | 2022-01-06T10:06:44 | 2022-01-06T10:06:44 | 196,600,572 | 0 | 0 | null | 2022-11-22T04:14:43 | 2019-07-12T15:12:33 | JavaScript | UTF-8 | Python | false | false | 6,052 | py | import os
import random
from django.contrib.auth.models import User
from django.db import models
# Create your models here.
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
from django.conf import settings
from django.db import models
from django.db.models.signals import pre_save
from django.dispatch import receiver
from django.urls import reverse
from django.utils.text import slugify
from djmoney.models.fields import MoneyField
from phonenumber_field.modelfields import PhoneNumberField
from embed_video.fields import EmbedVideoField
from places.fields import PlacesField
class Category(models.Model):
name=models.CharField(max_length=200,unique=True,)
def __str__(self):
return f"{self.name}"
class Type(models.Model):
name=models.CharField(max_length=200,unique=True)
category=models.ForeignKey(Category,related_name='category',on_delete=models.CASCADE)
def __str__(self):
return f"{self.name}"
class PublishedManager(models.Manager):
def get_queryset(self):
return super(PublishedManager, self).get_queryset().filter(status='published')
class AllObjectManager(models.Manager):
def get_queryset(self):
return super(AllObjectManager, self).get_queryset()
def get_filename_extention(filepath):
base_name=os.path.basename(filepath)
name,ext=os.path.splitext(base_name)
return name,ext
def upload_image_path(instance,filename):
new_filename=random.randint(1,1234567876543211)
name,ext=get_filename_extention(filename)
final_filename='{new_filename}{ext}'.format(new_filename=filename,ext=ext)
return 'sellproperty/{new_filename}/{final_filename}'.format(
new_filename=new_filename,
final_filename=final_filename
)
class SellProperty(models.Model):
STATUS_CHOICES=(
('draf','Draft'),
('published','Published')
)
ACTION_FOR=(('sale','Sale',),
('rent','Rent')
)
RENT_PER=(("nothing","One Time Price (For sale)"),
('month','PER MONTH'),
('year','PER YEAR'))
realator = models.ForeignKey(settings.AUTH_USER_MODEL,on_delete=models.CASCADE)
category =models.ForeignKey(Category,on_delete=models.CASCADE)
type =models.ForeignKey(Type,on_delete=models.CASCADE)
title =models.CharField(max_length=200)
full_description =RichTextUploadingField()
key_features =RichTextField()
min_price = MoneyField(max_digits=14, decimal_places=2, default_currency='USD')
max_price = MoneyField(max_digits=14, decimal_places=2, default_currency='USD')
created =models.DateTimeField(auto_now_add=True)
updated =models.DateTimeField(auto_now=True)
slug = models.SlugField()
status =models.CharField(max_length=12,choices=STATUS_CHOICES,default='published')
published =PublishedManager() #Costom model manager
objects =AllObjectManager() # Costom model manager
main_image =models.ImageField(upload_to=upload_image_path,default='default.jpg')
image_2 =models.ImageField(upload_to=upload_image_path,null=True,blank=True)
image_3 =models.ImageField(upload_to=upload_image_path,null=True,blank=True)
views = models.PositiveIntegerField(default=0, blank=True)
favourite =models.ManyToManyField(settings.AUTH_USER_MODEL,blank=True,related_name='favourite')
video = EmbedVideoField(null=True,blank=True)
action =models.CharField(max_length=6,choices=ACTION_FOR)
rent_per =models.CharField(max_length=30,choices=RENT_PER,null=True,blank=True)
location = PlacesField(blank=True)
def __unicode__(self):
return self.location.place
def __str__(self):
return f"{self.title}"
class Meta:
ordering=['-created']
def get_update_url(self,*args,**kwargs):
return reverse('dashboard:sell_update',kwargs={'pk':self.pk,'slug':self.slug})
def get_delete_url(self,*args,**kwargs):
return reverse('dashboard:sell_delete',kwargs={'pk':self.pk,'slug':self.slug})
def get_absolute_url(self,*args,**kwargs):
return reverse('site:detail',kwargs={'pk':self.pk,'slug':self.slug})
@receiver(pre_save,sender=SellProperty)
def pre_save_slug(sender,**kwargs):
slug=slugify(kwargs['instance'].title)
kwargs['instance'].slug=slug
class EnquiryManager(models.Manager):
def get_come(self,user):
return super(EnquiryManager, self).get_queryset().filter(property__realator=user)
def get_send(self,user):
return super(EnquiryManager, self).get_queryset().filter(email=user.email)
class Enquiry(models.Model):
property=models.ForeignKey(SellProperty,on_delete=models.CASCADE,related_name='enquiry')
name =models.CharField(max_length=100,blank=False,null=False)
email=models.EmailField(blank=False,null=False)
phone=PhoneNumberField(blank=True,null=True)
message=models.TextField(blank=False,null=False)
time =models.DateTimeField(auto_now_add=True)
objects=EnquiryManager()
def __str__(self):
return f'{self.name}'
class Meta:
ordering=['-time']
def get_come_delete_url(self,*args,**kwargs):
return reverse('dashboard:enquirycome_delete',kwargs={'pk':self.pk})
def get_send_delete_url(self,*args,**kwargs):
return reverse('dashboard:enquirysend_delete',kwargs={'pk':self.pk})
class MakeOffer(models.Model):
property=models.ForeignKey(SellProperty,on_delete=models.CASCADE,related_name='make_offer')
discount=models.DecimalField(max_digits=3,decimal_places=0)
time=models.DateTimeField(auto_now_add=True)
objects=AllObjectManager()
def get_delete_url(self,*args,**kwargs):
return reverse('dashboard:offer_remove',kwargs={
'pk':self.pk,
})
def __str__(self):
return f'{self.discount}'
class Meta:
ordering=['-time']
| [
"zanjarwhite@gmail.com"
] | zanjarwhite@gmail.com |
84089dc53634f26772a28b3d1295ad8597608f1f | 2cc7b406ad37ae5f4858a33e535970a229ed14ac | /pi/python_games/tetrominoforidiots.py | da62660a4f3352fb56d650a5330fd34d1f0a4ad8 | [] | permissive | mfitzgerald2/ScanINSoftware | aa9111146358057c2849cd89937df0a3d3032cb7 | 13af43805a5ecf04347cfba638926bcda8bed623 | refs/heads/master | 2020-03-06T17:22:27.730362 | 2018-04-24T12:26:28 | 2018-04-24T12:26:28 | 126,988,710 | 0 | 2 | MIT | 2018-03-27T14:18:55 | 2018-03-27T13:14:27 | PHP | UTF-8 | Python | false | false | 15,759 | py | # Tetromino for Idiots
# By Al Sweigart al@inventwithpython.com
# http://inventwithpython.com/pygame
# Released under a "Simplified BSD" license
# KRT 17/06/2012 rewrite event detection to deal with mouse use
import random, time, pygame, sys
from pygame.locals import *
FPS = 25
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
BOXSIZE = 20
BOARDWIDTH = 10
BOARDHEIGHT = 20
BLANK = '.'
MOVESIDEWAYSFREQ = 0.15
MOVEDOWNFREQ = 0.1
XMARGIN = int((WINDOWWIDTH - BOARDWIDTH * BOXSIZE) / 2)
TOPMARGIN = WINDOWHEIGHT - (BOARDHEIGHT * BOXSIZE) - 5
# R G B
WHITE = (255, 255, 255)
GRAY = (185, 185, 185)
BLACK = ( 0, 0, 0)
RED = (155, 0, 0)
LIGHTRED = (175, 20, 20)
GREEN = ( 0, 155, 0)
LIGHTGREEN = ( 20, 175, 20)
BLUE = ( 0, 0, 155)
LIGHTBLUE = ( 20, 20, 175)
YELLOW = (155, 155, 0)
LIGHTYELLOW = (175, 175, 20)
BORDERCOLOR = BLUE
BGCOLOR = BLACK
TEXTCOLOR = WHITE
TEXTSHADOWCOLOR = GRAY
COLORS = ( BLUE, GREEN, RED, YELLOW)
LIGHTCOLORS = (LIGHTBLUE, LIGHTGREEN, LIGHTRED, LIGHTYELLOW)
assert len(COLORS) == len(LIGHTCOLORS) # each color must have light color
TEMPLATEWIDTH = 5
TEMPLATEHEIGHT = 5
SHAPE_TEMPLATE = [['.....',
'.....',
'..O..',
'.....',
'.....']]
PIECES = {'A': SHAPE_TEMPLATE}
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, BIGFONT
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
BASICFONT = pygame.font.Font('freesansbold.ttf', 18)
BIGFONT = pygame.font.Font('freesansbold.ttf', 60)
pygame.display.set_caption('Tetromino for Idiots')
showTextScreen('Tetromino for Idiots')
while True: # game loop
if random.randint(0, 1) == 0:
pygame.mixer.music.load('tetrisb.mid')
else:
pygame.mixer.music.load('tetrisc.mid')
pygame.mixer.music.play(-1, 0.0)
runGame()
pygame.mixer.music.stop()
showTextScreen('Game Over')
def runGame():
# setup variables for the start of the game
board = getBlankBoard()
lastMoveDownTime = time.time()
lastMoveSidewaysTime = time.time()
lastFallTime = time.time()
movingDown = False # note: there is no movingUp variable
movingLeft = False
movingRight = False
score = 0
level, fallFreq = calculateLevelAndFallFreq(score)
fallingPiece = getNewPiece()
nextPiece = getNewPiece()
while True: # game loop
if fallingPiece == None:
# No falling piece in play, so start a new piece at the top
fallingPiece = nextPiece
nextPiece = getNewPiece()
lastFallTime = time.time() # reset lastFallTime
if not isValidPosition(board, fallingPiece):
return # can't fit a new piece on the board, so game over
checkForQuit()
for event in pygame.event.get(): # event handling loop
if event.type == KEYUP:
if (event.key == K_p):
# Pausing the game
DISPLAYSURF.fill(BGCOLOR)
pygame.mixer.music.stop()
showTextScreen('Paused') # pause until a key press
pygame.mixer.music.play(-1, 0.0)
lastFallTime = time.time()
lastMoveDownTime = time.time()
lastMoveSidewaysTime = time.time()
elif (event.key == K_LEFT or event.key == K_a):
movingLeft = False
elif (event.key == K_RIGHT or event.key == K_d):
movingRight = False
elif (event.key == K_DOWN or event.key == K_s):
movingDown = False
elif event.type == KEYDOWN:
# moving the piece sideways
if (event.key == K_LEFT or event.key == K_a) and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
movingLeft = True
movingRight = False
lastMoveSidewaysTime = time.time()
elif (event.key == K_RIGHT or event.key == K_d) and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
movingRight = True
movingLeft = False
lastMoveSidewaysTime = time.time()
# rotating the piece (if there is room to rotate)
elif (event.key == K_UP or event.key == K_w):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
elif (event.key == K_q): # rotate the other direction
fallingPiece['rotation'] = (fallingPiece['rotation'] - 1) % len(PIECES[fallingPiece['shape']])
if not isValidPosition(board, fallingPiece):
fallingPiece['rotation'] = (fallingPiece['rotation'] + 1) % len(PIECES[fallingPiece['shape']])
# making the piece fall faster with the down key
elif (event.key == K_DOWN or event.key == K_s):
movingDown = True
if isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
# move the current piece all the way down
elif event.key == K_SPACE:
movingDown = False
movingLeft = False
movingRight = False
for i in range(1, BOARDHEIGHT):
if not isValidPosition(board, fallingPiece, adjY=i):
break
fallingPiece['y'] += i - 1
# handle moving the piece because of user input
if (movingLeft or movingRight) and time.time() - lastMoveSidewaysTime > MOVESIDEWAYSFREQ:
if movingLeft and isValidPosition(board, fallingPiece, adjX=-1):
fallingPiece['x'] -= 1
elif movingRight and isValidPosition(board, fallingPiece, adjX=1):
fallingPiece['x'] += 1
lastMoveSidewaysTime = time.time()
if movingDown and time.time() - lastMoveDownTime > MOVEDOWNFREQ and isValidPosition(board, fallingPiece, adjY=1):
fallingPiece['y'] += 1
lastMoveDownTime = time.time()
# let the piece fall if it is time to fall
if time.time() - lastFallTime > fallFreq:
# see if the piece has landed
if not isValidPosition(board, fallingPiece, adjY=1):
# falling piece has landed, set it on the board
addToBoard(board, fallingPiece)
score += removeCompleteLines(board)
level, fallFreq = calculateLevelAndFallFreq(score)
fallingPiece = None
else:
# piece did not land, just move the piece down
fallingPiece['y'] += 1
lastFallTime = time.time()
# drawing everything on the screen
DISPLAYSURF.fill(BGCOLOR)
drawBoard(board)
drawStatus(score, level)
drawNextPiece(nextPiece)
if fallingPiece != None:
drawPiece(fallingPiece)
pygame.display.update()
FPSCLOCK.tick(FPS)
def makeTextObjs(text, font, color):
surf = font.render(text, True, color)
return surf, surf.get_rect()
def terminate():
pygame.quit()
sys.exit()
# KRT 17/06/2012 rewrite event detection to deal with mouse use
def checkForKeyPress():
for event in pygame.event.get():
if event.type == QUIT: #event is quit
terminate()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE: #event is escape key
terminate()
else:
return event.key #key found return with it
# no quit or key events in queue so return None
return None
##def checkForKeyPress():
## # Go through event queue looking for a KEYUP event.
## # Grab KEYDOWN events to remove them from the event queue.
## checkForQuit()
##
## for event in pygame.event.get([KEYDOWN, KEYUP]):
## if event.type == KEYDOWN:
## continue
## return event.key
## return None
def showTextScreen(text):
# This function displays large text in the
# center of the screen until a key is pressed.
# Draw the text drop shadow
titleSurf, titleRect = makeTextObjs(text, BIGFONT, TEXTSHADOWCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2))
DISPLAYSURF.blit(titleSurf, titleRect)
# Draw the text
titleSurf, titleRect = makeTextObjs(text, BIGFONT, TEXTCOLOR)
titleRect.center = (int(WINDOWWIDTH / 2) - 2, int(WINDOWHEIGHT / 2) - 2)
DISPLAYSURF.blit(titleSurf, titleRect)
# Draw the additional "Press a key to play." text.
pressKeySurf, pressKeyRect = makeTextObjs('Press a key to play.', BASICFONT, TEXTCOLOR)
pressKeyRect.center = (int(WINDOWWIDTH / 2), int(WINDOWHEIGHT / 2) + 100)
DISPLAYSURF.blit(pressKeySurf, pressKeyRect)
while checkForKeyPress() == None:
pygame.display.update()
FPSCLOCK.tick()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
def calculateLevelAndFallFreq(score):
# Based on the score, return the level the player is on and
# how many seconds pass until a falling piece falls one space.
level = int(score / 10) + 1
fallFreq = 0.27 - (level * 0.02)
return level, fallFreq
def getNewPiece():
# return a random new piece in a random rotation and color
shape = random.choice(list(PIECES.keys()))
newPiece = {'shape': shape,
'rotation': random.randint(0, len(PIECES[shape]) - 1),
'x': int(BOARDWIDTH / 2) - int(TEMPLATEWIDTH / 2),
'y': -2, # start it above the board (i.e. less than 0)
'color': random.randint(0, len(COLORS)-1)}
return newPiece
def addToBoard(board, piece):
# fill in the board based on piece's location, shape, and rotation
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if PIECES[piece['shape']][piece['rotation']][y][x] != BLANK:
board[x + piece['x']][y + piece['y']] = piece['color']
def getBlankBoard():
# create and return a new blank board data structure
board = []
for i in range(BOARDWIDTH):
board.append([BLANK] * BOARDHEIGHT)
return board
def isOnBoard(x, y):
return x >= 0 and x < BOARDWIDTH and y < BOARDHEIGHT
def isValidPosition(board, piece, adjX=0, adjY=0):
# Return True if the piece is within the board and not colliding
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
isAboveBoard = y + piece['y'] + adjY < 0
if isAboveBoard or PIECES[piece['shape']][piece['rotation']][y][x] == BLANK:
continue
if not isOnBoard(x + piece['x'] + adjX, y + piece['y'] + adjY):
return False
if board[x + piece['x'] + adjX][y + piece['y'] + adjY] != BLANK:
return False
return True
def isCompleteLine(board, y):
# Return True if the line filled with boxes with no gaps.
for x in range(BOARDWIDTH):
if board[x][y] == BLANK:
return False
return True
def removeCompleteLines(board):
# Remove any completed lines on the board, move everything above them down, and return the number of complete lines.
numLinesRemoved = 0
y = BOARDHEIGHT - 1 # start y at the bottom of the board
while y >= 0:
if isCompleteLine(board, y):
# Remove the line and pull boxes down by one line.
for pullDownY in range(y, 0, -1):
for x in range(BOARDWIDTH):
board[x][pullDownY] = board[x][pullDownY-1]
# Set very top line to blank.
for x in range(BOARDWIDTH):
board[x][0] = BLANK
numLinesRemoved += 1
# Note on the next iteration of the loop, y is the same.
# This is so that if the line that was pulled down is also
# complete, it will be removed.
else:
y -= 1 # move on to check next row up
return numLinesRemoved
def convertToPixelCoords(boxx, boxy):
# Convert the given xy coordinates of the board to xy
# coordinates of the location on the screen.
return (XMARGIN + (boxx * BOXSIZE)), (TOPMARGIN + (boxy * BOXSIZE))
def drawBox(boxx, boxy, color, pixelx=None, pixely=None):
# draw a single box (each tetromino piece has four boxes)
# at xy coordinates on the board. Or, if pixelx & pixely
# are specified, draw to the pixel coordinates stored in
# pixelx & pixely (this is used for the "Next" piece).
if color == BLANK:
return
if pixelx == None and pixely == None:
pixelx, pixely = convertToPixelCoords(boxx, boxy)
pygame.draw.rect(DISPLAYSURF, COLORS[color], (pixelx + 1, pixely + 1, BOXSIZE - 1, BOXSIZE - 1))
pygame.draw.rect(DISPLAYSURF, LIGHTCOLORS[color], (pixelx + 1, pixely + 1, BOXSIZE - 4, BOXSIZE - 4))
def drawBoard(board):
# draw the border around the board
pygame.draw.rect(DISPLAYSURF, BORDERCOLOR, (XMARGIN - 3, TOPMARGIN - 7, (BOARDWIDTH * BOXSIZE) + 8, (BOARDHEIGHT * BOXSIZE) + 8), 5)
# fill the background of the board
pygame.draw.rect(DISPLAYSURF, BGCOLOR, (XMARGIN, TOPMARGIN, BOXSIZE * BOARDWIDTH, BOXSIZE * BOARDHEIGHT))
# draw the individual boxes on the board
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
drawBox(x, y, board[x][y])
def drawStatus(score, level):
# draw the score text
scoreSurf = BASICFONT.render('Score: %s' % score, True, TEXTCOLOR)
scoreRect = scoreSurf.get_rect()
scoreRect.topleft = (WINDOWWIDTH - 150, 20)
DISPLAYSURF.blit(scoreSurf, scoreRect)
# draw the level text
levelSurf = BASICFONT.render('Level: %s' % level, True, TEXTCOLOR)
levelRect = levelSurf.get_rect()
levelRect.topleft = (WINDOWWIDTH - 150, 50)
DISPLAYSURF.blit(levelSurf, levelRect)
def drawPiece(piece, pixelx=None, pixely=None):
shapeToDraw = PIECES[piece['shape']][piece['rotation']]
if pixelx == None and pixely == None:
# if pixelx & pixely hasn't been specified, use the location stored in the piece data structure
pixelx, pixely = convertToPixelCoords(piece['x'], piece['y'])
# draw each of the boxes that make up the piece
for x in range(TEMPLATEWIDTH):
for y in range(TEMPLATEHEIGHT):
if shapeToDraw[y][x] != BLANK:
drawBox(None, None, piece['color'], pixelx + (x * BOXSIZE), pixely + (y * BOXSIZE))
def drawNextPiece(piece):
# draw the "next" text
nextSurf = BASICFONT.render('Next:', True, TEXTCOLOR)
nextRect = nextSurf.get_rect()
nextRect.topleft = (WINDOWWIDTH - 120, 80)
DISPLAYSURF.blit(nextSurf, nextRect)
# draw the "next" piece
drawPiece(piece, pixelx=WINDOWWIDTH-120, pixely=100)
if __name__ == '__main__':
main()
| [
"fitzge26@purdue.edu"
] | fitzge26@purdue.edu |
df7b811732f0bfb7df69e40f65934210a0fc7000 | b49253e9a27c1626c37df71806e39bad23587dbc | /webapps/settings.py | 04f86c08ffe0e499b6fd57b557428ac1fe07ca20 | [] | no_license | lzy0411/newtry | 9dd59c474e9cf19df3756255c47b82401093478d | 58189e2617daa3b2d9639f975dd5488ad8bba4b9 | refs/heads/main | 2023-01-24T06:50:44.684718 | 2020-12-06T16:50:35 | 2020-12-06T16:50:35 | 318,941,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,112 | py | """
Django settings for webapps project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'g=yi6p82)@h#g7)xvzr3x#c=s3xihbzesp9it6g2%sas*uzteh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['.herokuapp.com', '127.0.0.1:8000']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'channels',
'mahjong',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
# Used by the authentication system for the private-todo-list application.
# URL to use if the authentication system requires a user to log in.
LOGIN_URL = '/mahjong/login'
ROOT_URLCONF = 'webapps.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'webapps.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
ASGI_APPLICATION = 'webapps.asgi.application'
CHANNEL_LAYERS = {
'default': {
'BACKEND': 'channels_redis.core.RedisChannelLayer',
'CONFIG': {
"hosts": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
},
},
}
CACHES = {
'default': {
'BACKEND': 'channels_redis.cache.RedisCache',
"LOCATION": [os.environ.get('REDIS_URL', 'redis://localhost:6379')],
'OPTIONS': {
"CLIENT_CLASS": "django_redis_client.DefaultClient",
},
},
}
STATIC_URL = '/static/'
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
STATIC_ROOT = os.path.join(BASE_DIR, "staticfiles", "static-root")
MEDIA_URL = "/media/"
MEDIA_ROOT = os.path.join(BASE_DIR, "staticfiles", "media-root") | [
"zeyuanli@andrew.cmu.edu"
] | zeyuanli@andrew.cmu.edu |
654879c2638886e4b9d482aa8c35bc8e992e3a80 | 1b493677f623ac54027b8d47e9994bbbc51a8c79 | /rango/models.py | d81f4b797441143ca3ba815b3c07f83ce4b0a802 | [] | no_license | Junjie98/tango_with_django_project | a63d764b9ecc2fcf9b05bddc079b025a4fb68280 | 451019252b2b75172d3f7a273ee16d901b33a09f | refs/heads/master | 2023-06-30T09:22:40.299582 | 2021-07-30T13:42:51 | 2021-07-30T13:42:51 | 389,663,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,796 | py | from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
class Category(models.Model):
NAME_MAX_LENGTH = 128
name = models.CharField(max_length=NAME_MAX_LENGTH, unique=True)
views = models.IntegerField(default=0)
likes = models.IntegerField(default=0)
slug = models.SlugField(unique=True) #OR, to allow empty space. models.SlugField(blank=True)
#unique means the given field's value must be unique thruout the underlying database table.
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Category, self).save(*args, **kwargs) #defined in the base django.db.models.Model class.
#^ this call performs necessary logic to take the changes and save the said changes to correct database table.
class Meta:
verbose_name_plural = 'Categories'
def __str__(self):
return self.name
class Page(models.Model):
TITLE_MAX_LENGTH = 128
URL_MAX_LENGTH = 200
category = models.ForeignKey(Category, on_delete=models.CASCADE)
title = models.CharField(max_length=TITLE_MAX_LENGTH)
url = models.URLField()
views = models.IntegerField(default=0)
def __str__(self):
return self.title
class UserProfile(models.Model):
# This line is required. It links the user profile to a user model instance.
user = models.OneToOneField(User, on_delete=models.CASCADE)
#attributes that we wish to add in additionally. (Outside of what the User model provide by default.)
website = models.URLField(blank=True)
picture = models.ImageField(upload_to='profile_images', blank=True)
#^ the upload_to attribute is conjoined with project's MEDIA_ROOT
def __str__(self):
return self.user.username
| [
"nelsonlow_88@hotmail.com"
] | nelsonlow_88@hotmail.com |
6f0a932bb496bd530acf0346ee4ffc508b7279cc | d51401cbd0b98c37f0b35efd617bd18717983828 | /seccion8_ejrs/ejr3.py | 2e067e6643e6492faf751e8b294099b348b73bc2 | [] | no_license | Marlon97/Curso_python_u | b21f7aaa33d208b99495ee4f16cfbd7cd00058a6 | da1f179e6779e9ae1863a7f6ca8d04a3d9987aa1 | refs/heads/master | 2022-11-28T08:50:54.608500 | 2020-07-29T07:16:32 | 2020-07-29T07:16:32 | 283,403,586 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 195 | py | contador = 1
while(contador<61):
print(f"el cuadrado de {contador} es {contador**2}")
contador+=1
for contador in range (1,61):
print(f"el cuadrado de {contador} es {contador ** 2}") | [
"A01379404@itesm.mx"
] | A01379404@itesm.mx |
5b5cae4b38cb318516d558efd98056635d0fbbfd | fe79e7ccebb7b209324487de3f0f429ddfe89c8d | /src/collab_filtering.py | ff52a0dcd862915870abd22e320829f7b5746ef9 | [] | no_license | ktitan123/Py-Reco | 6c0ee7e6825eacbd0b4e5d0f3ec18ed1f9b3da84 | 6b7b2b145e997de0f13581c2f53a3131487e678d | refs/heads/master | 2016-09-05T20:45:23.042248 | 2015-04-15T09:06:18 | 2015-04-15T09:06:18 | 32,513,279 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,352 | py |
import math
def average(user,d):
l=d[user]
sum=0.0
n=0
for x in l:
if x != 'x':
sum=sum+x
n=n+1
return (sum/float(n))
def itemSimilarity(item1, item2, d):
users=[]
index1=movies[item1]
index2=movies[item2]
for x in d:
if d[x][index1]!='x' and d[x][index2]!='x':
users.append(x)
sum1=0.0
sum2=0.0
sum3=0.0
for user in users:
av=average(user,d)
sum1=sum1+(d[user][index1]-av)*(d[user][index2]-av)
sum2=sum2+(d[user][index1]-av)**2
sum3=sum3+(d[user][index2]-av)**2
return (sum1/(sum2*sum3+0.0001))
def pearson(user1,user2,d):
i=0
n1=0.0
av1=average(user1,d)
av2=average(user2,d)
d1=0.0
d2=0.0
while i<len(d[user1]):
if d[user1][i]!='x' and d[user2][i]!='x':
n1=n1+(av1-d[user1][i])*(av2-d[user2][i])
d1=d1+(av1-d[user1][i])*(av1-d[user1][i])
d2=d2+(av2-d[user2][i])*(av2-d[user2][i])
i=i+1
return (0.001+n1)/(0.001+math.sqrt(d1)*math.sqrt(d2))
def calc(user1,user2,d,index):
return (((pearson(user1,user2,d))**2))*d[user2][index]
def recommend(user,d):
candidates=[]
i=0
while i<len(d[user]):
if d[user][i]=='x':
candidates.append(moviesrev[i])
i=i+1
if len(candidates)==0:
return "None"
reco=[]
for c in candidates:
i=0
num=0.0
den=0.0
while i<len(d[user]):
if d[user][i]!='x':
num=num+itemSimilarity(moviesrev[i],c,d)*d[user][i]
den=den+itemSimilarity(moviesrev[i],c,d)
i=i+1
reco.append((((num/den)+0.001),c))
reco.sort()
return reco
def predict(user,d):
i=0
reco={}
den=0.0
num=0.0
while i<len(d[user]):
if d[user][i]=='x':
for person in d:
if person != user:
num=num+calc(user,person,d,i)
den=den+(pearson(user,person,d)**2)
reco[moviesrev[i]]=round((num/den),2)
i=i+1
return reco
moviesrev={0:'Inception',1:'Hangover',2:'Excorcism',3:'Frozen',4:'Mission Impossible'}
movies={'Inception':0,'Hangover':1,'Excorcism':2,'Frozen':3,'Mission Impossible':4}
d={}
d['john']=[1.5,'x',4.0,'x',3.0]
d['jack']=[0.0,0.5,4.5,1.0,2.0]
d['alice']=[2.0,2.0,1.5,2.5,2.0]
d['peter']=[2.5,2.5,1.0,5.0,3.0]
print d
print recommend('john',d)
#print itemSimilarity('Inception','Hangover',d)
| [
"kevin.sebastian@flipkart.com"
] | kevin.sebastian@flipkart.com |
96a2619f71a797d5d72ae26e3c5af54bd97c4c61 | e53361472f91a7798e52a0112769a285413e01a3 | /agent-client/glms/simple_thread.py | 21a93b36dd9c596f00d67d3c6f98b95d1171a755 | [] | no_license | allenjin/GLITS_Manager | ef0b5df159290563eaa8f416989b878ae2b293ba | 8eefedbb4693b31d5f61d8453defcd904ba737af | refs/heads/master | 2021-01-10T14:30:34.010477 | 2016-02-17T11:08:34 | 2016-02-17T11:08:34 | 48,750,617 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,077 | py | import logging
import threading
import time
LOG = logging.getLogger(__name__)
class SimpleThread(threading.Thread):
def __init__(self, name, fn, *args, **kwargs):
threading.Thread.__init__(self, name=name)
self.setDaemon(True)
self._fn = fn
self._args = args
self._kwargs = kwargs
self._should_stop = False
self._timeout = None
def run(self):
while not self._should_stop:
start = time.time()
wait_time = self._timeout
try:
self._fn(*self._args, **self._kwargs)
except Exception, _:
LOG.exception("Exception in thread '%s'" % (self.getName(),))
diff = time.time() - start
if diff < wait_time:
time.sleep(max(0, wait_time - diff))
LOG.debug("Stopping %s" % (self.getName(),))
def should_stop(self, latch=None):
self._should_stop = True
def set_name(self, val):
self.setName(val)
def set_timeout(self, timeout):
self._timeout = timeout
| [
"allen3jin@163.com"
] | allen3jin@163.com |
4de36e3b4b7819d39bea1a5fc28371a28f807256 | 5a67a73ff4b8388a3109ca5ff1f89ea0b8ac780f | /manualImage.py | 3e6a66bc1a71f4bd75f973bf16c805a5519df028 | [] | no_license | paperclip/cat-camera | 4919db4f181fb1d14ef8bed42461f425db6b2010 | 5e246b057d7600df5d2e92e9723dadf12a44118b | refs/heads/master | 2022-09-29T12:51:22.464915 | 2022-08-29T13:35:28 | 2022-08-29T13:35:28 | 150,312,844 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,199 | py | #!/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import re
import json
try:
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import QApplication
except ImportError:
try:
import PyQt4.QtCore as QtCore
from PyQt4.QtGui import QApplication
except ImportError:
raise ImportError("ImageViewerQt: Requires PyQt5 or PyQt4.")
import viewImage
import tensorflow1.generate_roc_data
import tensorflow1.camera_dir
tensorflow1.camera_dir.cd_camera_dir()
category=[]
import database.db
# subprocess.call([RSYNC,"-va","douglas@pi:webdata/camera/","camera"])
#~ cats = set(os.listdir("images/cat"))
#~ notcats = set(os.listdir("images/not_cat"))
#~ camera = set(os.listdir("camera"))
#~ print(len(cats))
#~ print(len(notcats))
#~ print(len(camera))
#~ new = camera - cats
#~ new -= notcats
#~ marker = "timelapse-2018-08-12-13-15-24.jpeg"
def total():
t = 0
for base in range(100,-1,-1):
directory = os.path.join("new_cat","%02d"%base)
if os.path.isdir(directory):
pics = os.listdir(directory)
t += len(pics)
return t
REMAINING = None
def skipImage(basename):
if False and basename > "timelapse-2019-05-04-00" and basename < "timelapse-2019-05-09-00":
global REMAINING
REMAINING -= 1
print("Skipping {}: {}".format(basename, REMAINING))
return True
return False
def newCat(imageProcessor):
for base in range(100,-1,-1):
directory = os.path.join("new_cat","%02d"%base)
if not os.path.isdir(directory):
continue
pics = os.listdir(directory)
for p in pics:
if not skipImage(p):
yield os.path.join(directory,p)
imageProcessor.close()
def getPrediction(imagePath):
p = imagePath
while True:
p, b = os.path.split(p)
try:
return int(b)
except ValueError:
pass
if b == "":
break
return 0
GL_DB = database.db.Database()
class ImageDetails(object):
def __init__(self, src):
self.__m_src = src
self.__m_record = GL_DB.getRecord(os.path.basename(src))
self.__m_is_cat = False
self.__m_prediction = getPrediction(src)
if self.__m_prediction == 0:
print("0:",src)
def is_cat(self, actuallyCat=True):
self.__m_is_cat = actuallyCat
self.__m_record['cat'] = 1 if actuallyCat else 0
GL_DB.updateRecord(self.__m_record)
def get_is_cat(self):
return self.__m_is_cat
def get_prediction(self):
return self.__m_prediction
def save_results(results):
actuallyCatResults = []
actuallyNotCatResults = []
for r in results.values():
prediction = r.get_prediction()
if r.get_is_cat():
actuallyCatResults.append(prediction)
else:
actuallyNotCatResults.append(prediction)
if len(actuallyCatResults) == 0 or len(actuallyNotCatResults) == 0:
return
actuallyCatResults.sort()
actuallyNotCatResults.sort()
open("actuallyCatResults.json","w").write(json.dumps(actuallyCatResults))
open("actuallyNotCatResults.json","w").write(json.dumps(actuallyNotCatResults))
results = tensorflow1.generate_roc_data.generate_roc_data(actuallyCatResults, actuallyNotCatResults)
open("roc.json","w").write(json.dumps(results))
class ManualImageProcessor(object):
def __init__(self):
self.m_closed = False
self.m_results = {}
self.m_truePositive = 0
self.m_trueNegative = 0
self.m_falsePostive = 0
self.m_falseNegative = 0
# Create image viewer and load an image file to display.
self.m_viewer = viewImage.QtImageViewer()
self.m_viewer.keyPressed.connect(self.on_key)
self.m_previousImages = []
self.m_nextImages = []
self.m_moreImages = newCat(self)
self.m_imageName = None
global REMAINING
REMAINING = total()
self.nextImage()
self.m_viewer.show()
def close(self):
if not self.m_closed:
print("Killing")
save_results(self.m_results)
self.outputCounts()
self.m_viewer.deleteLater()
self.m_closed = True
def on_key(self, event):
key = event.key()
if key == QtCore.Qt.Key_Q:
self.close()
elif key == QtCore.Qt.Key_Left:
return self.previous()
elif key == QtCore.Qt.Key_Up:
return self.cat()
elif key == QtCore.Qt.Key_Right:
return self.nextImage()
elif key == QtCore.Qt.Key_Down:
return self.notcat()
def previous(self):
if len(self.m_previousImages) > 0:
self.m_nextImages.append(self.m_imageName)
self.setImage(self.m_previousImages.pop())
else:
print("No previous image")
def nextImage(self):
if self.m_imageName is not None:
self.m_previousImages.append(self.m_imageName)
if len(self.m_nextImages) > 0:
self.setImage(self.m_nextImages.pop())
else:
n = next(self.m_moreImages)
self.setImage(n)
def cat(self):
global REMAINING
print('Cat %d remaining '%REMAINING)
self.moveImage("images","cat")
return self.nextImage()
def notcat(self):
global REMAINING
print('Not Cat %d'%REMAINING)
self.moveImage("images","not_cat")
return self.nextImage()
def src(self):
if os.path.isfile(self.m_imageName):
return self.m_imageName
base = os.path.basename(self.m_imageName)
p = os.path.join("images","not_cat",base)
if os.path.isfile(p):
return p
p = os.path.join("images","cat",base)
if os.path.isfile(p):
return p
return self.m_imageName
def updateCounts(self, src, dest):
global REMAINING
base = os.path.basename(src)
prediction = getPrediction(self.m_imageName)
predictedCat = prediction >= 50
actualCat = "not_cat" not in dest
print("predicted = %r (%d), actual = %r"%(predictedCat, prediction, actualCat))
self.m_results[base].is_cat(actualCat)
if "new_cat" not in src:
REMAINING += 1
## subtract out error
if "not_cat" in src:
## moving from not_cat to cat
assert actualCat
if predictedCat:
## we predicted cat, and it actually was a cat
self.m_falsePostive -= 1
else:
self.m_trueNegative -= 1
else:
## moving from cat to not_cat
assert not actualCat
if predictedCat:
self.m_truePositive -= 1
else:
self.m_falseNegative -= 1
if predictedCat:
if actualCat:
self.m_truePositive += 1
else:
self.m_falsePostive += 1
else:
if actualCat:
self.m_falseNegative += 1
else:
self.m_trueNegative += 1
def moveImage(self, *dest):
base = os.path.basename(self.m_imageName)
src = self.src()
dest = os.path.join(*dest, base)
statbuf = os.stat(src)
size = statbuf.st_size
if src == dest:
print("%s already moved"%src)
return
## Update counts
self.updateCounts(src, dest)
global REMAINING
if os.path.isfile(dest):
print("%s already exists"%dest)
os.unlink(src)
REMAINING -= 1
else:
print("Rename %s to %s (%d bytes) (%f KiB)"%(src,dest, size, size / 1024.0))
os.rename(src,dest)
REMAINING -= 1
def setImage(self, imageName):
self.m_imageName = imageName
src = self.src()
if "new_cat" in src:
self.m_results[os.path.basename(src)] = ImageDetails(src)
self.m_viewer.loadImageFromFile(src)
def outputCounts(self):
print("True Positive = %d"%self.m_truePositive)
print("True Negative = %d"%self.m_trueNegative)
print("False Positive = %d"%self.m_falsePostive)
print("False Negative = %d"%self.m_falseNegative)
print()
if self.m_truePositive + self.m_falseNegative > 0:
print("Recall = %f"% (1.0*self.m_truePositive / (self.m_truePositive + self.m_falseNegative)))
if self.m_truePositive + self.m_falsePostive > 0:
print("Precision = %f"%(1.0*self.m_truePositive / (self.m_truePositive + self.m_falsePostive)))
print()
def main(argv):
# Create the application.
app = QApplication(argv)
processor = ManualImageProcessor()
# Show viewer and run application.
try:
return app.exec_()
finally:
processor.close()
GL_DB.close()
if __name__ == '__main__':
sys.exit(main(sys.argv))
| [
"douglas.leeder@gmail.com"
] | douglas.leeder@gmail.com |
2496fe20939060e4e360a7862a99df132f61170b | d8cbe9ce0469f72b8929af01538b6ceddff10a38 | /homeassistant/components/sensibo/sensor.py | 8048eece3389bcd056ef788ba4ca8de6ce7edddc | [
"Apache-2.0"
] | permissive | piitaya/home-assistant | 9c1ba162dac9604e4d43e035e74bad7bba327f0b | 48893738192431f96966998c4ff7a3723a2f8f4a | refs/heads/dev | 2023-03-07T16:13:32.117970 | 2023-01-10T17:47:48 | 2023-01-10T17:47:48 | 172,578,293 | 3 | 1 | Apache-2.0 | 2023-02-22T06:15:56 | 2019-02-25T20:19:40 | Python | UTF-8 | Python | false | false | 12,334 | py | """Sensor platform for Sensibo integration."""
from __future__ import annotations
from collections.abc import Callable, Mapping
from dataclasses import dataclass
from datetime import datetime
from typing import TYPE_CHECKING, Any
from pysensibo.model import MotionSensor, SensiboDevice
from homeassistant.components.sensor import (
SensorDeviceClass,
SensorEntity,
SensorEntityDescription,
SensorStateClass,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
UnitOfElectricPotential,
UnitOfTemperature,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import EntityCategory
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from .const import DOMAIN
from .coordinator import SensiboDataUpdateCoordinator
from .entity import SensiboDeviceBaseEntity, SensiboMotionBaseEntity
PARALLEL_UPDATES = 0
@dataclass
class MotionBaseEntityDescriptionMixin:
"""Mixin for required Sensibo base description keys."""
value_fn: Callable[[MotionSensor], StateType]
@dataclass
class DeviceBaseEntityDescriptionMixin:
"""Mixin for required Sensibo base description keys."""
value_fn: Callable[[SensiboDevice], StateType | datetime]
extra_fn: Callable[[SensiboDevice], dict[str, str | bool | None] | None] | None
@dataclass
class SensiboMotionSensorEntityDescription(
SensorEntityDescription, MotionBaseEntityDescriptionMixin
):
"""Describes Sensibo Motion sensor entity."""
@dataclass
class SensiboDeviceSensorEntityDescription(
SensorEntityDescription, DeviceBaseEntityDescriptionMixin
):
"""Describes Sensibo Device sensor entity."""
FILTER_LAST_RESET_DESCRIPTION = SensiboDeviceSensorEntityDescription(
key="filter_last_reset",
device_class=SensorDeviceClass.TIMESTAMP,
name="Filter last reset",
icon="mdi:timer",
value_fn=lambda data: data.filter_last_reset,
extra_fn=None,
)
MOTION_SENSOR_TYPES: tuple[SensiboMotionSensorEntityDescription, ...] = (
SensiboMotionSensorEntityDescription(
key="rssi",
device_class=SensorDeviceClass.SIGNAL_STRENGTH,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
state_class=SensorStateClass.MEASUREMENT,
name="rssi",
icon="mdi:wifi",
value_fn=lambda data: data.rssi,
entity_registry_enabled_default=False,
),
SensiboMotionSensorEntityDescription(
key="battery_voltage",
device_class=SensorDeviceClass.VOLTAGE,
entity_category=EntityCategory.DIAGNOSTIC,
native_unit_of_measurement=UnitOfElectricPotential.VOLT,
state_class=SensorStateClass.MEASUREMENT,
name="Battery voltage",
icon="mdi:battery",
value_fn=lambda data: data.battery_voltage,
),
SensiboMotionSensorEntityDescription(
key="humidity",
device_class=SensorDeviceClass.HUMIDITY,
native_unit_of_measurement=PERCENTAGE,
state_class=SensorStateClass.MEASUREMENT,
name="Humidity",
icon="mdi:water",
value_fn=lambda data: data.humidity,
),
SensiboMotionSensorEntityDescription(
key="temperature",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
name="Temperature",
icon="mdi:thermometer",
value_fn=lambda data: data.temperature,
),
)
PURE_SENSOR_TYPES: tuple[SensiboDeviceSensorEntityDescription, ...] = (
SensiboDeviceSensorEntityDescription(
key="pm25",
device_class=SensorDeviceClass.PM25,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=SensorStateClass.MEASUREMENT,
name="PM2.5",
icon="mdi:air-filter",
value_fn=lambda data: data.pm25,
extra_fn=None,
),
SensiboDeviceSensorEntityDescription(
key="pure_sensitivity",
name="Pure sensitivity",
icon="mdi:air-filter",
value_fn=lambda data: data.pure_sensitivity,
extra_fn=None,
translation_key="sensitivity",
),
FILTER_LAST_RESET_DESCRIPTION,
)
DEVICE_SENSOR_TYPES: tuple[SensiboDeviceSensorEntityDescription, ...] = (
SensiboDeviceSensorEntityDescription(
key="timer_time",
device_class=SensorDeviceClass.TIMESTAMP,
name="Timer end time",
icon="mdi:timer",
value_fn=lambda data: data.timer_time,
extra_fn=lambda data: {"id": data.timer_id, "turn_on": data.timer_state_on},
),
SensiboDeviceSensorEntityDescription(
key="feels_like",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
name="Temperature feels like",
value_fn=lambda data: data.feelslike,
extra_fn=None,
entity_registry_enabled_default=False,
),
SensiboDeviceSensorEntityDescription(
key="climate_react_low",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
name="Climate React low temperature threshold",
value_fn=lambda data: data.smart_low_temp_threshold,
extra_fn=lambda data: data.smart_low_state,
entity_registry_enabled_default=False,
),
SensiboDeviceSensorEntityDescription(
key="climate_react_high",
device_class=SensorDeviceClass.TEMPERATURE,
state_class=SensorStateClass.MEASUREMENT,
name="Climate React high temperature threshold",
value_fn=lambda data: data.smart_high_temp_threshold,
extra_fn=lambda data: data.smart_high_state,
entity_registry_enabled_default=False,
),
SensiboDeviceSensorEntityDescription(
key="climate_react_type",
translation_key="smart_type",
name="Climate React type",
value_fn=lambda data: data.smart_type,
extra_fn=None,
entity_registry_enabled_default=False,
),
FILTER_LAST_RESET_DESCRIPTION,
)
AIRQ_SENSOR_TYPES: tuple[SensiboDeviceSensorEntityDescription, ...] = (
SensiboDeviceSensorEntityDescription(
key="airq_tvoc",
native_unit_of_measurement=CONCENTRATION_PARTS_PER_BILLION,
state_class=SensorStateClass.MEASUREMENT,
icon="mdi:air-filter",
name="AirQ TVOC",
value_fn=lambda data: data.tvoc,
extra_fn=None,
),
SensiboDeviceSensorEntityDescription(
key="airq_co2",
device_class=SensorDeviceClass.CO2,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
state_class=SensorStateClass.MEASUREMENT,
name="AirQ CO2",
value_fn=lambda data: data.co2,
extra_fn=None,
),
)
ELEMENT_SENSOR_TYPES: tuple[SensiboDeviceSensorEntityDescription, ...] = (
SensiboDeviceSensorEntityDescription(
key="pm25",
device_class=SensorDeviceClass.PM25,
native_unit_of_measurement=CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
state_class=SensorStateClass.MEASUREMENT,
name="PM 2.5",
value_fn=lambda data: data.pm25,
extra_fn=None,
),
SensiboDeviceSensorEntityDescription(
key="tvoc",
native_unit_of_measurement=CONCENTRATION_PARTS_PER_BILLION,
state_class=SensorStateClass.MEASUREMENT,
name="TVOC",
value_fn=lambda data: data.tvoc,
extra_fn=None,
),
SensiboDeviceSensorEntityDescription(
key="co2",
device_class=SensorDeviceClass.CO2,
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
state_class=SensorStateClass.MEASUREMENT,
name="CO2",
value_fn=lambda data: data.co2,
extra_fn=None,
),
SensiboDeviceSensorEntityDescription(
key="ethanol",
native_unit_of_measurement=CONCENTRATION_PARTS_PER_MILLION,
state_class=SensorStateClass.MEASUREMENT,
name="Ethanol",
value_fn=lambda data: data.etoh,
extra_fn=None,
),
SensiboDeviceSensorEntityDescription(
key="iaq",
device_class=SensorDeviceClass.AQI,
state_class=SensorStateClass.MEASUREMENT,
name="Air quality",
value_fn=lambda data: data.iaq,
extra_fn=None,
),
)
DESCRIPTION_BY_MODELS = {
"pure": PURE_SENSOR_TYPES,
"airq": AIRQ_SENSOR_TYPES,
"elements": ELEMENT_SENSOR_TYPES,
}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up Sensibo sensor platform."""
coordinator: SensiboDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
entities: list[SensiboMotionSensor | SensiboDeviceSensor] = []
for device_id, device_data in coordinator.data.parsed.items():
if device_data.motion_sensors:
entities.extend(
SensiboMotionSensor(
coordinator, device_id, sensor_id, sensor_data, description
)
for sensor_id, sensor_data in device_data.motion_sensors.items()
for description in MOTION_SENSOR_TYPES
)
entities.extend(
SensiboDeviceSensor(coordinator, device_id, description)
for device_id, device_data in coordinator.data.parsed.items()
for description in DESCRIPTION_BY_MODELS.get(
device_data.model, DEVICE_SENSOR_TYPES
)
)
async_add_entities(entities)
class SensiboMotionSensor(SensiboMotionBaseEntity, SensorEntity):
"""Representation of a Sensibo Motion Sensor."""
entity_description: SensiboMotionSensorEntityDescription
def __init__(
self,
coordinator: SensiboDataUpdateCoordinator,
device_id: str,
sensor_id: str,
sensor_data: MotionSensor,
entity_description: SensiboMotionSensorEntityDescription,
) -> None:
"""Initiate Sensibo Motion Sensor."""
super().__init__(
coordinator,
device_id,
sensor_id,
sensor_data,
)
self.entity_description = entity_description
self._attr_unique_id = f"{sensor_id}-{entity_description.key}"
@property
def native_unit_of_measurement(self) -> str | None:
"""Add native unit of measurement."""
if self.entity_description.device_class == SensorDeviceClass.TEMPERATURE:
return UnitOfTemperature.CELSIUS
return self.entity_description.native_unit_of_measurement
@property
def native_value(self) -> StateType:
"""Return value of sensor."""
if TYPE_CHECKING:
assert self.sensor_data
return self.entity_description.value_fn(self.sensor_data)
class SensiboDeviceSensor(SensiboDeviceBaseEntity, SensorEntity):
"""Representation of a Sensibo Device Sensor."""
entity_description: SensiboDeviceSensorEntityDescription
def __init__(
self,
coordinator: SensiboDataUpdateCoordinator,
device_id: str,
entity_description: SensiboDeviceSensorEntityDescription,
) -> None:
"""Initiate Sensibo Device Sensor."""
super().__init__(
coordinator,
device_id,
)
self.entity_description = entity_description
self._attr_unique_id = f"{device_id}-{entity_description.key}"
@property
def native_unit_of_measurement(self) -> str | None:
"""Add native unit of measurement."""
if self.entity_description.device_class == SensorDeviceClass.TEMPERATURE:
return UnitOfTemperature.CELSIUS
return self.entity_description.native_unit_of_measurement
@property
def native_value(self) -> StateType | datetime:
"""Return value of sensor."""
state = self.entity_description.value_fn(self.device_data)
return state
@property
def extra_state_attributes(self) -> Mapping[str, Any] | None:
"""Return additional attributes."""
if self.entity_description.extra_fn is not None:
return self.entity_description.extra_fn(self.device_data)
return None
| [
"noreply@github.com"
] | noreply@github.com |
88547a812253911bf4aa61845c37ea6cdb4c44a0 | 7681be3d385e4d05eeda61d711bae142efdd2f1f | /www/app.py | ed23b783400c4fb53a2399773b82527e04f96eb4 | [
"Apache-2.0"
] | permissive | zzy0471/spyblog | 93dc93f7a477af64eba0ee8e12cd8407f3fa0c59 | 5dbb192fbd7cb3e14f72b38e0e2dbef244461440 | refs/heads/master | 2021-01-19T05:33:29.165857 | 2016-07-08T00:37:22 | 2016-07-08T00:37:22 | 61,761,096 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 598 | py | #!/usr/bin/python3
import logging
import asyncio, os, json, time
from datetime import datetime
from aiohttp import web
def index(request):
return web.Response(body = b'<h1>Hello</h1>')
@asyncio.coroutine
def init(loop):
logging.basicConfig(level=logging.INFO)
app = web.Application(loop = loop)
app.router.add_route('GET', '/', index)
srv = yield from loop.create_server(app.make_handler(), '127.0.0.1', 8888)
logging.info('server started at http://127.0.0.1:8888...')
return srv
loop = asyncio.get_event_loop()
loop.run_until_complete(init(loop))
loop.run_forever() | [
"zzy_0471@163.com"
] | zzy_0471@163.com |
602534b2b5640835f91753fe88773c67f8116f05 | 7da6ecf172b3e9354d93ddfe06f87b930fad90b3 | /pickup/generator_profile/folder.py | 8b6f79f8d7e9a8b25b0989cacbb483ad3f55c10e | [] | no_license | exhuma/pickup | 05f8d271de95d76b337a6994dcd21799fe0e4b34 | 688b05d0ae1276dcc386b45c8ddb1cea71b15cb1 | refs/heads/master | 2016-09-06T01:21:08.343607 | 2011-07-15T15:09:10 | 2011-07-15T15:09:10 | 1,059,260 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,683 | py | """
The folder plugin create a bzipped tar file for a specific folder. It is also
possible to specify a parent folder and create individual tarballs for each
folder and one for files beneath that folder.
Configuration
~~~~~~~~~~~~~
The following fields are used by this plugin:
**path** (string)
The folder
**split** (boolean) *optional*
If set to "True", this module will create individual tarballs (Default =
False).
Configuration Example
~~~~~~~~~~~~~~~~~~~~~
.. code-block:: python
dict(
name = 'My home folder',
profile = 'folder',
config = dict(
path = '/home/me',
split = True,
)
),
"""
import logging
import tarfile
import re
from os.path import exists, join, abspath, isdir
import os
LOG = logging.getLogger(__name__)
API_VERSION = (2,0)
CONFIG = {}
SOURCE = {}
def init(source):
"""
If split is set, this strategy will create one folder per subfolder in the
given path.
"""
CONFIG.update(source['config'])
SOURCE.update(source)
LOG.debug("Initialised '%s' with %r" % ( __name__, CONFIG))
def run(staging_area):
if not exists(CONFIG['path']):
LOG.error("Path '%s' does not exist! Skipping!" % CONFIG['path'])
return
if CONFIG.get("split", False):
create_split_tar(staging_area)
else:
create_simple_tar(staging_area)
def create_split_tar(staging_area):
"""
Creates one tar file for each folder found in CONFIG['path']. If normal
files reside in that folder, they will be collected into a special tarfile
named "__PICKUP_FILES__.tar.bz2"
@param staging_area: The target folder
"""
if not isdir(CONFIG['path']):
LOG.error("Impossible to create a split tar! %s is not a folder!" % CONFIG['path'])
return
LOG.info("Creating tarball for each folder inside %s" % CONFIG['path'])
if not exists(staging_area):
os.makedirs( staging_area )
elif not isdir(staging_area):
LOG.error("'%s' exists and is not a folder! Skipping" % staging_area)
return
files = []
for entry in os.listdir(CONFIG['path']):
entrypath = join(CONFIG['path'], entry)
# Add directories directly, and add normal files into a special filename
if not isdir(entrypath):
files.append(entrypath)
continue
tarname = join(staging_area, "%s.tar.bz2" % entry)
LOG.info("Writing to '%s'" % abspath(tarname))
tar = tarfile.open(abspath(tarname), "w:bz2")
tar.add(entrypath)
tar.close()
if files:
tarname = join(staging_area, "__PICKUP_FILES__.tar.bz2")
LOG.info("Writing remaining files to '%s'" % abspath(tarname))
tar = tarfile.open(abspath(tarname), "w:bz2")
for file in files:
LOG.info(" Adding %s" % file)
tar.add(file)
tar.close()
def get_basename():
"""
Create a 'clean' filename
"""
# replace non-ascii characters with underscores
basename = re.sub( r'[^a-zA-Z0-9]', "_", SOURCE['name'] )
# now remove all leading/trainling underscores
basename = basename.strip("_")
# prevent accidental overwrites
counter = 0
while exists(basename):
counter += 1
LOG.debug( "File %s exists. Adding a counter." % basename )
basename = "%s-%d" % (basename, counter)
return basename
def create_simple_tar(staging_area):
LOG.info("Creating tarball for path %s" % CONFIG['path'])
tarname = "%s.tar.bz2" % get_basename()
# put it into the staging area
tarname = join(staging_area, tarname)
LOG.info("Writing to '%s'" % abspath(tarname))
tar = tarfile.open(abspath(tarname), "w:bz2")
tar.add( CONFIG['path'] )
tar.close()
| [
"michel@albert.lu"
] | michel@albert.lu |
20651e32d909f28fe941ac052d688ca545b59ec8 | 3145c8c63bfe2f65ac5e37f38a226b866080d0c7 | /myDbGen.py | 82ec07857c31b23d98cf182c390e873e03fe8e2f | [] | no_license | Kirshan1205/_pycademy | c9db93d9081d5ca78583e00c6fd09c4589e46686 | 6579fb86dd6548a7b919fbbfd4d1c62244608c4b | refs/heads/master | 2021-01-07T23:45:26.156098 | 2020-02-20T10:23:53 | 2020-02-20T10:23:53 | 241,853,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | import pickle, json, pprint, os
from pymongo import MongoClient
con = MongoClient("mongodb://maysam:0183552313@ds149479.mlab.com:49479/pycademy")
db = con.pycademy
pp= pprint.PrettyPrinter(indent=4)
dirs=os.walk(os.getcwd()+'/database/courses').next()
# ----- reading db from json file------
with open('db.json') as f:
print f
webDb= json.load(f)
# ----- reading courses from courses directory -------
courses={}
for d in dirs[1]:
jsonAddress= '/home/ubuntu/workspace/database/courses/{}/course_details.json'.format(d)
with open(jsonAddress) as f:
courseDetail= json.load(f)
courseDetail['image']= '/home/ubuntu/workspace/database/courses/{}/course_image.jpg'.format(d)
courseDetail['course_plan']= '/home/ubuntu/workspace/database/courses/{}/course_plan.pdf'.format(d)
courses[d]=courseDetail
# course detail enrichment
for ID,course in courses.iteritems():
course['teacher']=webDb["teachers"][course['teacherID'].lower()]['name']
# course detail enrichment
for ID,course in courses.iteritems():
course['teacher']=webDb["teachers"][course['teacherID'].lower()]['name']
# --------------reading images from gallery --------
imageDirs=os.walk(os.getcwd()+'/static/img/gallery/').next()
images={}
for i,d in enumerate(imageDirs[2]):
images[i] = {"address":"/static/img/gallery/" + d}
webDb['courses']=courses
webDb['images']=images
db.teachers.drop()
db.courses.drop()
db.images.drop()
teachers = db.teachers
for tid, teacher in webDb['teachers'].iteritems():
teacher['tid']=tid
teachers.insert(teacher)
courses = db.courses
for cid, course in webDb['courses'].iteritems():
course['cid']=cid
courses.insert(course)
images = db.images
for iid, image in webDb['images'].iteritems():
image['iid']=iid
images.insert(image)
blogs = db.blogs
postsDirs = os.walk(os.getcwd()+'/database/blogs').next()
for postDir in postsDirs[1]:
post = "{}/{}/post.json".format(postsDirs[0],postDir)
with open(post) as a:
blogs.insert(json.load(a))
stats = db.stats
for sid, stat in webDb['stat'].iteritems():
stat['sid']=sid
stats.insert(stat)
pp.pprint(webDb)
# ------- save to db ------
with open('webDb.pkl','wb') as f:
pickle.dump(webDb, f , pickle.HIGHEST_PROTOCOL) | [
"maysam@kidocode.com"
] | maysam@kidocode.com |
59ca19d850aaded2ce8f83598274f14f60a18201 | 3bc0c7eb2eab102a599b829c5a46f55c70d1f67e | /main/models.py | 587f6124c3861d458048c747e1b3d5c89742fbf3 | [] | no_license | Mrfad/resume | 7c22ed7137109484a8aa9661f96ccaae1274eba0 | fccafe6b74eb148e357dae283780c33b3e160018 | refs/heads/main | 2023-08-19T21:02:20.794765 | 2021-09-19T17:00:41 | 2021-09-19T17:00:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,265 | py | from django.db import models
class About(models.Model):
header = models.TextField()
img = models.ImageField(upload_to="profile/", null=True, blank=True)
title = models.CharField(max_length=150, null=True, blank=True)
intro = models.TextField(null=True, blank=True)
address = models.CharField(max_length=250, null=True, blank=True)
githublink = models.CharField(max_length=250, null=True, blank=True)
bday = models.DateField()
website = models.CharField(max_length=150, null=True, blank=True)
phone = models.CharField(max_length=100, null=True, blank=True)
city = models.CharField(max_length=100)
age = models.IntegerField()
degree = models.CharField(max_length=100, null=True, blank=True)
email = models.EmailField()
freelance = models.CharField(max_length=100)
long_description = models.TextField()
def __str__(self):
return self.title
class SkillType(models.Model):
name = models.CharField(max_length=50, null=True, blank=True)
def __str__(self):
return self.name
class Skill(models.Model):
name = models.CharField(max_length=50, null=True, blank=True)
type = models.ForeignKey(SkillType, null=True, blank=True, on_delete=models.CASCADE)
level = models.IntegerField()
def __str__(self):
return self.name
class FactDescription(models.Model):
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.description
class Fact(models.Model):
fact_name = models.CharField(max_length=150, null=True, blank=True)
icon = models.CharField(max_length=150, null=True, blank=True)
number = models.IntegerField(null=True, blank=True)
def __str__(self):
return self.fact_name
class ExperienceDEtail(models.Model):
duty = models.CharField(max_length=500, null=True, blank=True)
def __str__(self):
return self.duty
class Experience(models.Model):
title = models.CharField(max_length=250, null=True, blank=True)
description = models.TextField(null=True, blank=True)
from_year = models.DateField(null=True, blank=True)
to_year = models.DateField(null=True, blank=True)
present = models.CharField(max_length=50, null=True, blank=True)
company_name = models.CharField(max_length=200, null=True, blank=True)
duty = models.ManyToManyField(ExperienceDEtail)
def __str__(self):
return self.title
class College(models.Model):
college_name = models.CharField(max_length=250, null=True, blank=True)
def __str__(self):
return self.college_name
class Certification(models.Model):
cert_name = models.CharField(max_length=250, null=True, blank=True)
cert_date = models.CharField(max_length=250, null=True, blank=True)
college = models.ForeignKey(College, on_delete=models.CASCADE, null=True, blank=True)
description = models.TextField(null=True, blank=True)
def __str__(self):
return self.cert_name
class Contact(models.Model):
name = models.CharField(max_length=100)
email = models.EmailField()
subject = models.CharField(max_length=150)
message = models.TextField()
message_date = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.name
| [
"fady.ghalayiny@outlook.com"
] | fady.ghalayiny@outlook.com |
66dc0f2daff11b6cce93fd0485b61c72d2d44f92 | 1adc05008f0caa9a81cc4fc3a737fcbcebb68995 | /hardhat/recipes/pango.py | 534919580c9ee2d79f2ed539a86e1db554ea72c2 | [
"MIT",
"BSD-3-Clause"
] | permissive | stangelandcl/hardhat | 4aa995518697d19b179c64751108963fa656cfca | 1ad0c5dec16728c0243023acb9594f435ef18f9c | refs/heads/master | 2021-01-11T17:19:41.988477 | 2019-03-22T22:18:44 | 2019-03-22T22:18:52 | 79,742,340 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | from .base import GnuRecipe
class PangoRecipe(GnuRecipe):
def __init__(self, *args, **kwargs):
super(PangoRecipe, self).__init__(*args, **kwargs)
self.sha256 = '1d2b74cd63e8bd41961f2f8d952355aa' \
'0f9be6002b52c8aa7699d9f5da597c9d'
self.name = 'pango'
self.depends = ['cairo', 'fontconfig', 'glib', 'harfbuzz']
self.version = '1.42.4'
self.version_regex = '(?P<version>\d+\.\d+)'
self.version_url = 'http://ftp.gnome.org/pub/GNOME/sources/pango/'
short_version = '.'.join(self.version.split('.')[:2])
self.url = 'http://ftp.gnome.org/pub/gnome/sources/$name/' \
'%s/$name-$version.tar.xz' % short_version
| [
"clayton.stangeland@gmail.com"
] | clayton.stangeland@gmail.com |
c85c460a448c4a63602d3d96b271abbdb9f524f3 | afbcda99c55aeb26360d593f1abe99afbbb1d1b7 | /Python/Temppraw/temppraw.py | e6309910cf300fdc9d0c9bc4b437f7b346c77495 | [] | no_license | cstuartroe/misc | b4c4fb2f8ef7341acf99f35e9eece1cf3769a0fc | 307b00c3ab7e51204401e84bd6c4466315889dfe | refs/heads/master | 2023-08-17T19:07:59.535257 | 2023-08-06T16:07:27 | 2023-08-06T16:07:27 | 156,424,382 | 0 | 0 | null | 2022-05-25T02:00:29 | 2018-11-06T17:50:34 | Java | UTF-8 | Python | false | false | 859 | py | import praw
import time
import datetime
current = time.time()
reddit = praw.Reddit(client_id='PTofuEjEjIPbcg',
client_secret='_R0b3zmCvjXGPseYbaPIUEnZAlU',
password='LinguisticsIsCool208',
user_agent='testscript by /u/conor_emily_ling208',
username='conor_emily_ling208')
def get_worthwhile_posts():
reddit.read_only = True
rWP = reddit.subreddit('WritingPrompts')
posts = []
for submission in rWP.new(limit=500):
timestamp = submission.created
elapsed = int(current - timestamp + 28800)
score = submission.score
if (elapsed < 86400) and (score >= 4) and (elapsed/score < 3600) and (submission.num_comments <= 1):
posts.append({'title':submission.title,'score':score,'elapsed':elapsed//3600})
return posts
| [
"cstuartroe@haverford.edu"
] | cstuartroe@haverford.edu |
2fc35c78749760c361cd5b6ea2884fc7fd16bb07 | f8a66f137d53306d1f05db6a2a6a0f4d0bd5acf1 | /Cyber-Main/JSL_Threat_Intel_Framework_whodat/a.py | f08854459bb01f5f6acedb36866ec7d61afe6614 | [] | no_license | sec-js/JSL-Cyber-ThreatIntelCore | 5d9e63a5fca0b0d2e250d682332ad86286277205 | a66c350b42c7ed95a4e3703e82983626fdab8ab7 | refs/heads/master | 2020-12-03T12:46:53.319750 | 2017-02-03T19:32:30 | 2017-02-03T19:32:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,957 | py | import csv
def write_final_output(dico):
f = open("outputfile", 'w')
f.write(
'pipelineid' + ',' + 'datauploadid' + ',' + 'uuid' + ',' + 'referential' + ',' + 'datasourcename' + ',' + 'date' + ',' + 'cog' + ',' + 'model' + ',' + 'concept' \
+ ',' + 'segment' + ',' + 'pedigree' + ',' + 'confidence_score' + ',' + 'ipaddress' + ',' + 'ipaddress_int' + ',' + 'offenderclass' + ',' + 'first_observed_date' + ',' + 'first_observed_time' + ',' + \
'most_recent_observation_date' + ',' + 'most_recent_observation_time' + ',' + 'total_observations' + ',' + 'blranking' + ',' + 'threat_score' + ',' + 'total_capabilities' + ',' + \
'commvett' + ',' + 'commdatevett' + ',' + 'govvett' + ',' + 'govdatevett' + ',' + 'countryabbrv' + ',' + 'country' + ',' + 'city' + ',' + 'coordinates' + ',' + 'geo_longitude' + ',' + 'geo_latitude' \
+ ',' + 'isp' + ',' + 'domain' + ',' + 'netspeed' + ',' + 'network_asn' + ',' + 'network_class' + ',' + 'network_type' + ',' + 'active boolean' + ',' + 'insrtdttm' + ',' + 'updtdttm' + '\n')
for entry in dico:
f.write(str(entry['pipelineid']) + ',' + str(entry['datauploadid']) + ',' + str(entry['uuid']) + ',' + str(
entry['referential']) + ',' + str(entry['datasourcename']) + ',' + str(entry['date']) + ',' + str(
entry['cog']) + ',' + str(entry['model']) + ',' + str(entry['concept']) \
+ ',' + str(entry['segment']) + ',' + str(entry['pedigree']) + ',' + str(
entry['confidence_score']) + ',' + str(entry['ipaddress']) + ',' + str(entry['ipaddress_int']) + ',' + str(
entry['offenderclass']) + ',' + str(entry['first_observed_date']) + ',' + str(
entry['first_observed_time']) + ',' + \
str(entry['most_recent_observation_date']) + ',' + str(
entry['most_recent_observation_time']) + ',' + str(entry['total_observations']) + ',' + str(
entry['blranking']) + ',' + str(entry['threat_score']) + ',' + str(entry['total_capabilities']) + ',' + \
str(entry['commvett']) + ',' + str(entry['commdatevett']) + ',' + str(entry['govvett']) + ',' + str(
entry['govdatevett']) + ',' + str(entry['countryabbrv']) + ',' + str(entry['country']) + ',' + str(
entry['city']) + ',' + str(entry['coordinates']) + ',' + str(entry['geo_longitude']) + ',' + str(
entry['geo_latitude']) \
+ ',' + str(entry['isp']) + ',' + str(entry['domain']) + ',' + str(entry['netspeed']) + ',' + str(
entry['network_asn']) + ',' + str(entry['network_class']) + ',' + str(entry['network_type']) + ',' + str(
entry['active boolean']) + ',' + str(entry['insrtdttm']) + ',' + str(entry['updtdttm']) + '\n')
with open('test.csv') as f:
reader = csv.reader(f, skipinitialspace=True)
header = next(reader)
a = [dict(zip(header, map(str, row))) for row in reader]
write_final_output(a)
| [
"jonathan@johnsnowlabs.com"
] | jonathan@johnsnowlabs.com |
9be5c86ac51d2d6aeb06cdaa2a38671bb3997d9d | 629f8699870607f37a400655a2ccf4b94e1d0925 | /api/tests.py | 83cab9f11bf372bf6945060f623bee1ad89f0d1d | [
"MIT"
] | permissive | wendymunyasi/alcohol-tracker-api | 5ec4cbc628fb7947d83b4c6c7dd919de8c206083 | ee837817303e0d86d590dd1ae046e4675817f79d | refs/heads/master | 2023-07-31T14:39:08.075432 | 2021-01-25T11:32:44 | 2021-01-25T11:32:44 | 279,367,389 | 2 | 0 | null | 2021-09-22T19:32:51 | 2020-07-13T17:21:29 | Python | UTF-8 | Python | false | false | 1,061 | py | from django.test import TestCase
from .models import NewsLetterRecipient
class NewsLetterRecipientTestCase(TestCase):
''' Test case for the newsletter recipients model '''
def setUp(self):
self.new_newsletter_recipient = NewsLetterRecipient(email="damon@gmail.com")
self.new_newsletter_recipient.save()
def tearDown(self):
''' test to run before every test case '''
NewsLetterRecipient.objects.all().delete()
def test_newsletter_recipient_instance(self):
''' test to see if a newsletter recipient instance was created correctly '''
self.assertTrue(isinstance(self.new_newsletter_recipient, NewsLetterRecipient))
def test_save_newsletter_recipient(self):
''' test to see if a newsletter recipient was saved correctly '''
self.test_newsletter_recipient = NewsLetterRecipient( email="damon@gmail.com")
self.test_newsletter_recipient.save()
newsletter_recipients = NewsLetterRecipient.objects.all().count()
self.assertTrue(newsletter_recipients == 2) | [
"wendymunyasi@gmail.com"
] | wendymunyasi@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.