blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3ca57b11d92a7d2c2ffb779458d209bfc706cdd5 | d3f92b32093159ddb62822b0da166c06983846d1 | /Downloads/PycharmProjects/pyse11/python_base/import_1.py | 7833a6ce798e75885be32e1829bf08bf7acf6d3d | [] | no_license | zyall/demo | 73f4ad7f09e286df8d22b74e0d6c35ffd1884c5c | 053df3fb20040021e3da5254e148a66abf204f56 | refs/heads/temp | 2021-06-14T13:00:01.766615 | 2019-07-19T03:11:36 | 2019-07-19T03:11:36 | 197,181,715 | 0 | 0 | null | 2021-06-02T00:01:08 | 2019-07-16T11:33:18 | Python | UTF-8 | Python | false | false | 272 | py | # from selenium import webdriver
#模组
# import time
# from time import sleep,ctime
# from time import *
def sleep():
'''如果重名,会先调用自定义的方法'''
print("I am def sleep")
print(ctime())
sleep(1)
print(ctime())
# print(help(time))
| [
"1554754887@qq.com"
] | 1554754887@qq.com |
8018a99e055ad4843c27b7387124fad14d59ae97 | ac4c53256a5e78553bf6d58f2644715a097fe855 | /python/math_axplusbyplusczequald.py | 37d16088cedc5cb4c85c9a331e3f6746fd5bc2b9 | [] | no_license | kalyancheerla/sample-codes | c9868cc2f35f1f3a5836a855c5f0096855a620d2 | b6719c0717ebdea5b2c0974789ec1d180fa7dc10 | refs/heads/master | 2023-08-23T04:00:16.695031 | 2021-09-23T04:10:54 | 2021-09-23T04:10:54 | 227,298,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 633 | py | #!/bin/env python3
import sys
def main():
# lets assume ax+by+cz=d equation
# a,b,c,d as +ve integer constants
# x, y, z are +ve integers
# for known values of a,b,c,d
# find all possible values of x y & z.
a,b,c,d = map(int, input().split())
# print(a, b, c, d)
# lets find range of x
xmax = d//a
for xtmp in range(xmax, 0, -1):
tmp1 = d - (a * xtmp)
ymax = tmp1//b
for ytmp in range(ymax, 0, -1):
tmp2 = tmp1 - (b * ytmp)
if (tmp2 > 0) and ((tmp2 % c) == 0):
print(xtmp, ytmp, tmp2//c)
if __name__ == "__main__":
main()
| [
""
] | |
534ad72b7cc4d76225e99a6607852a2b134d1119 | c5c0c9200f1d8f0f66950a00f18ebd690f678cb8 | /.c9/metadata/environment/todo/apps.py | 3b7a661c7db22b18b1f31f49281a09eb53c8c81a | [] | no_license | Lemoenskil/django_todo | de882e0926eeffbaa1488452b845dd6979c209aa | cc2435513ad5af284e045120e8008c9bc3d8cb2e | refs/heads/master | 2022-12-13T06:48:11.144994 | 2021-04-27T19:02:12 | 2021-04-27T19:02:12 | 245,660,781 | 0 | 0 | null | 2022-12-08T03:45:32 | 2020-03-07T15:49:37 | Python | UTF-8 | Python | false | false | 414 | py | {"filter":false,"title":"apps.py","tooltip":"/todo/apps.py","undoManager":{"mark":-1,"position":-1,"stack":[]},"ace":{"folds":[],"scrolltop":0,"scrollleft":0,"selection":{"start":{"row":0,"column":0},"end":{"row":0,"column":0},"isBackwards":false},"options":{"guessTabSize":true,"useWrapMode":false,"wrapToView":true},"firstLineState":0},"timestamp":1583065505225,"hash":"d09c7c3ed0506db79cea9ab255aaad272fb14e20"} | [
"ubuntu@ip-172-31-38-9.eu-central-1.compute.internal"
] | ubuntu@ip-172-31-38-9.eu-central-1.compute.internal |
377d56b008a18aee612969c8d1d865f1fabb4bc0 | 3187cb009bb5134b3fddd0c0cdf45e944c5715de | /tokenizer/migrations/0008_auto_20200720_1958.py | abc79fab13fdcee46170e056a9b36c6d0ae1572a | [] | no_license | Jiyoonki/annotator | 978dfb16e0122b42760da8df292514551458ad7f | 9472b2d58043e7ca61b1e622ec99f6ef62eee0a8 | refs/heads/master | 2022-11-21T13:17:41.244333 | 2020-07-30T02:19:02 | 2020-07-30T02:19:02 | 283,648,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 509 | py | # Generated by Django 3.0.3 on 2020-07-20 10:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('tokenizer', '0007_auto_20200720_1955'),
]
operations = [
migrations.RenameField(
model_name='keywords',
old_name='code',
new_name='key',
),
migrations.RenameField(
model_name='keywords',
old_name='code_index',
new_name='key_index',
),
]
| [
"kjiyoon1@gmail.com"
] | kjiyoon1@gmail.com |
b08d1c7b5e05781d77b4702a12bf846f79a46088 | 2fc8c742ff48219e5866667767b5fb5e4dfd5c88 | /alabb/källkod/karta.py | b09c2236acf0dd39c71a63289c081c5923602672 | [] | no_license | fabianrevilla/tilda | 41cd7ed4043a595d3ade38f356e920514050d550 | 19d5b7ccad58e9738e35bf6b3b7268d3eb2cf561 | refs/heads/master | 2020-09-08T23:04:02.884057 | 2019-11-12T17:34:41 | 2019-11-12T17:34:41 | 221,269,956 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,710 | py | island_map="""
---------------------------------------------------------------------------------------------------------------------------
. 🏛
. . .
. . .
🏥 . . . . . .
. . . 🌆 . . . .
. 🏢 . . . . . 🏙 . . . .
🏯 . . . . . . . . . . 🏨
. . . . . . . . . . . .
. . . . . . 🏢 . . . .
. . . . . . . . . . . . . .
. . 🏪 . . . . . 🏢 . .
. . . . . . 🏰 . . . . ♖
. 🏫 . . . . . . . . . . .
. . . . . . 🏥 . . . . . . .
🏯 . . . . . 🏰 . . 🎡 . ♜ .
. . 🏣 . . . . . . . . .
🏤 . . . . . . . . . . . 🏰
. . . . . . . . . . . .
. . 🏨 . . . . . . . .
. . . 🏠 . 🌆 . 🏭
---------------------------------------------------------------------------------------------------------------------------
"""
| [
"fabianrevilla95@gmail.com"
] | fabianrevilla95@gmail.com |
53f105e9a16c218d5698c35ab3d888d4d9d69c58 | 9baa9f1bedf7bc973f26ab37c9b3046824b80ca7 | /venv-bck/bin/easy_install | f306fde1138489c4a226dd5e0a062fb6a8fad8e7 | [] | no_license | shakthydoss/suriyan | 58774fc5de1de0a9f9975c2ee3a98900e0a5dff4 | 8e39eb2e65cc6c6551fc165b422b46d598cc54b8 | refs/heads/master | 2020-04-12T05:36:59.957153 | 2017-01-08T06:12:13 | 2017-01-08T06:12:13 | 59,631,349 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | #!/Users/saksekar/suriyan/venv/bin/python
# -*- coding: utf-8 -*-
import sys
import re
from setuptools.command.easy_install import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"shakthydoss@gmail.com"
] | shakthydoss@gmail.com | |
70fa6242536c1c1bcbeeaaff1057f7626d0145e1 | 6d0a9262618334516cce2f1a59e41cb02e7280c4 | /dist_ml/local_test.py | ae636da75a247a772f53fcd6a6ec4802df263723 | [
"Apache-2.0"
] | permissive | rzhu3/ray_dist_ml | 2bdc7b7c75d35d7eae5a981ff6cdc5f3a159f0a4 | a576810922a9de763af4df62f93e2f74272f628a | refs/heads/master | 2020-03-28T11:34:08.530568 | 2018-09-11T02:53:06 | 2018-09-11T02:53:06 | 148,226,974 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,449 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import time
import datetime
import logging
import tensorflow as tf
import numpy as np
from dist_ml.sparse_nnpca import SparsePCA
# from dist_ml.sparse_classifier import SparseClassifier
logging.basicConfig(
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO,
datefmt='%Y-%m-%d %H:%M:%S')
if __name__ == "__main__":
train_files = 'a8a_data/a8a_train'
test_files = 'a8a_data/a8a_test'
tf.reset_default_graph()
# net = SparseClassifier(train_files=train_files, test_files=test_files)
net = SparsePCA(dataset='a8a')
weights = net.get_weights()
# while True:
writer = tf.summary.FileWriter('summary', net.sess.graph)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord, sess=net.sess)
start_time = datetime.datetime.now()
try:
while not coord.should_stop():
# compute an update and push it to the parameter server.
# gradients = net.compute_grad_next_batch()
_, step, logits = net.sess.run([net.train_step, net.global_step, net.logits])
# grad = net.compute_grad_next_batch()
if step % 100 == 0:
# loss_value, train_acc_value, test_acc_value, summary_value = net.test()
loss_value, summary_value, global_steps = net.test()
weights = net.get_weights()
np.save('%s_data/ckpt/local_weights_%03d.npy' % ('a8a', global_steps),
weights[1][0])
end_time = datetime.datetime.now()
# logging.info(
# "[{}] Step: {}, loss: {}, train_acc: {}, valid_acc: {}".
# format(end_time - start_time, step, loss_value,
# train_acc_value, test_acc_value))
logging.info(
"[{}] Step: {}, loss: {}".
format(end_time - start_time, step, loss_value))
writer.add_summary(summary_value, step)
# saver.save(sess, checkpoint_file_path, global_step=step)
start_time = end_time
tf.get_variable_scope().reuse_variables()
except tf.errors.OutOfRangeError:
print("Training Finished.")
finally:
coord.request_stop()
coord.join(threads)
| [
"rui.tyler.zhu@gmail.com"
] | rui.tyler.zhu@gmail.com |
e739400744b2ad5200541f59322b788dbfd44032 | 34a81b1c39a1c5200342cb63959d935c002b2c19 | /PYTHON/dataModels.py | 64b0feddfb3b63dddd3833166dc8728745aa1e00 | [] | no_license | coretx09/PW-CS50 | 69921a746d819c258a78ae2e77cc487fc25b0727 | 799c859efa0a29b23cb1f3dea6a257c1edc4267a | refs/heads/master | 2023-07-17T04:15:44.671855 | 2021-08-09T09:38:46 | 2021-08-09T09:38:46 | 316,319,773 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,473 | py | # SEQUENCES
# COLLECTIONS
import collections
from random import choice
# collections.namedtuple to construct a simple class to represent ---> individual cards
Card = collections.namedtuple('Card', ['rank', 'suit']) # Card: class
print(Card.__doc__)
beer_card = Card(4, 'moi') # beer_card: class instance
print(beer_card)
class FrenchDeck:
ranks = [str(n) for n in range(2, 11)] + list('JQKA') # ranks list
suits = 'spades diamonds clubs hearts'.split() # scission des mots en list, suits list
def __init__(self):
self._cards = [Card(rank,suit) for rank in self.ranks
for suit in self.suits]
def __len__(self):
return len(self._cards)
def __getitem__(self, position):
return self._cards[position]
def __call__(self):
return self._cards
# LIST RANKS & SUITS
print(f'rank: {FrenchDeck.ranks} suit: {FrenchDeck.suits}')
# INSTANCIATION
deck = FrenchDeck()
# MAGIC METHODS:
print(len(deck)) #called __len__
print(deck[50]) # called __getitem__
print(deck()) # called __call__
# collections
list1 = [str(x) for x in range(10, 16, 2)]
list2 = ['A', 'B', 'C']
Combine = collections.namedtuple('Combine',['nombre', 'mot'])
liste3 = [Combine(x, y) for x in list1 for y in list2]
print(liste3)
#Python already has a function to get a random item from a sequence: random.choice.
print(choice(liste3))
# ELLIPSIS
for card in deck:
print(card) | [
"ngampiosauvet@gmail.com"
] | ngampiosauvet@gmail.com |
311069543284b2bc146f63a4419a6b1c1c2286b8 | 08607218396a0269a90e8b4e6d099a5e99e39a8b | /database/schemes/easyTest/script/testCase/U商城项目/U商城管理端/站点设置/友情链接/worm_1482819508/友情链接.py | a743fe0cfcbaa179d5cb2864b7ab079e770d7400 | [
"MIT"
] | permissive | TonnaMajesty/test | 4a07297557669f98eeb9f94b177a02a4af6f1af0 | 68b24d1f3e8b4d6154c9d896a7fa3e2f99b49a6f | refs/heads/master | 2021-01-19T22:52:18.309061 | 2017-03-06T10:51:05 | 2017-03-06T10:51:05 | 83,779,681 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,227 | py | # coding=utf-8
from time import sleep, time
from SRC.common.decorator import codeException_dec
from SRC.unittest.case import TestCase
from script.common import utils
class EasyCase(TestCase):
def __init__(self, webDriver, paramsList):
# 请不要修改该方法
super(EasyCase, self).__init__(webDriver, paramsList)
@codeException_dec('3')
def runTest(self):
driver = self.getDriver()
param = self.param
tool = utils
'''
##################################################################
浏览器驱动:driver
例如:
driver.get('http://www.demo.com')
driver.find_element_by_id("kw","输入框").send_keys("Remote")
driver.find_elements_by_id("su","查找")[0].click()
参数化:param
说明:
需要进行参数化的数据,用param.id 替换,id为参数化配置文件中的id值
自定义工具模块:tool 文件所在路径script/common/utils.py
开发人员可根据需要自行添加新的函数
例如:
获取一个随机生成的字符串:number=tool.randomStr(6)
##################################################################
该方法内进行测试用例的编写
'''
# driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/ul[7]/li[1]/upmark').click(); # 点击站点设置
driver.find_element_by_xpath('/html/body/div[1]/div/div[1]/ul[7]/li[10]/a').click() # 点击友情链接
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[2]/div/a').click() # 点击新增
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[1]/div[1]/div/input').send_keys(u'你想去哪?') # 输入链接名称
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[1]/div[2]/div/input').send_keys('demo.upmall.yonyouup.com') # 输入链接URL
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[1]/div[5]/div/a').click() # 点击上传
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[1]/div[5]/div/input').send_keys('E:\\tupian\\hhhhhh.jpg') # 上传图片
#os.system("E:\\pythonScript\\autoit\\guanbi.au4.exe") # 调用guanbi.exe程序关闭windows窗口
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[2]/div/button[2]').click() # 点击确定
#driver.find_elements_by_xpath('//a[@class="colorblue"]')[0].click(); # 点击编辑
driver.find_element_by_css_selector("body > div.container.corp-page.ng-scope > div > div.col-xs-10.corp-content > div > div:nth-child(3) > div > table > tbody > tr:nth-child(1) > td.text-center > a:nth-child(1)").click() # 点击编辑
driver.find_element_by_xpath('/html/body/div[1]/div/div[2]/div/div[3]/div[2]/div/button[2]').click() # 点击确定
#driver.find_elements_by_xpath('//a[@class="colorblue"]')[1].click(); # 点击删除
driver.find_element_by_css_selector("body > div.container.corp-page.ng-scope > div > div.col-xs-10.corp-content > div > div:nth-child(3) > div > table > tbody > tr:nth-child(1) > td.text-center > a:nth-child(2)").click() # 点击删除
driver.find_element_by_css_selector("body > div.modal.fade.ng-isolate-scope.in > div > div > div.modal-footer.ng-scope > button:nth-child(1)").click() # 点击确定
sleep(3) | [
"1367441805@qq.com"
] | 1367441805@qq.com |
2ac30f335a6c1d4abe97c7e865c42caf18562513 | 40be1809a4421276c52b5e707dae6eca195947b2 | /svc_twitter_old.py | f56b432aec785be2d51f17170504fb5254de11bd | [] | no_license | Selenestica/apex-legends-armory | a2c16480a9133d8c09e9ff56e0f2abfe79f44b2f | 8c42102a2ef7b7fe03e8830d40c52a09817fbc09 | refs/heads/master | 2023-03-30T04:06:02.629010 | 2021-04-01T10:35:39 | 2021-04-01T10:35:39 | 263,827,172 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,572 | py | import re
import requests
from loguru import logger
from sqlalchemy import *
import twint
class SVC_Twitter:
@staticmethod
def cmd_twitter(phrase, metadata, session):
# phrase to be two parts: search by (keyword or username), the search argument
search_by = phrase.split(", ")[0]
keyword = phrase.split(", ")[1]
try:
config = twint.Config()
if search_by == "username":
config.Username = keyword
body_text = "Tweets by: " + keyword + "\n"
else:
config.Search = keyword
body_text = "Tweets about: " + keyword + "\n"
config.Limit = 10 # running search
config.Hide_output = True
config.Store_object = True
twint.run.Search(config)
res = twint.output.tweets_list
if len(res) > 0:
limiter = 0
body_text = "Tweets:\n"
for tweet in res:
print(tweet)
limiter += 1
if limiter == 6:
break
else:
body_text += "Username: " + tweet.username
body_text += "\nDate: " + tweet.datetime
body_text += "\nTweet: " + tweet.tweet + "\n\n"
else:
body_text = "We didn't find any tweets about " + keyword + "."
except Exception as e:
print(e)
body_text = "Error: " + str(e)
return body_text
| [
"53917177+Selenestica@users.noreply.github.com"
] | 53917177+Selenestica@users.noreply.github.com |
3967764432938a4044f5f34273540736100967e3 | 67692cd6c7948874a06aa24381a070cd8e1ce251 | /Challenge 8 v3 For While Mean Median.py | f442b2cb6c4b0180912f56be3fda8fb5ff93e940 | [] | no_license | kawmaiparis/Completed-Programs | c8f557df246d6a576791c705f4a11274d208b133 | f8a292b7b2ad78c36332e9576ad2c156d799563e | refs/heads/master | 2021-01-11T04:14:28.547336 | 2016-10-23T12:37:01 | 2016-10-23T12:37:01 | 71,226,526 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,586 | py | import statistics
import math
import time
mark = []
run = "yesh"
while "y" in run:
try:
print("1.For\n2.While")
option = input("")
print("")
if option == "1":
position = 1
n = input("Number of scores you want to add: ")
n = int(n)
pos = 0
for o in range(0,n,1):
score = input(str(position) + ". " )
mark.append(int(score))
position += 1
if option == "2":
position = 1
print("Type 'done' or press enter to stop")
while True:
score = input(str(position) + ". ")
position += 1
if score == "done" or score == "":
break
else:
mark.append(int(score))
mean = statistics.mean(mark)
median = statistics.median(mark)
total = sum(mark)
time.sleep(1)
print("Mean: " + str(mean))
print("Median: " + str(median))
print("Total: " + str(total))
run = input("Again? ")
mark = []
except:
print("Invalid command. Please try again")
| [
"paris.kawmai@gmail.com"
] | paris.kawmai@gmail.com |
1299c9e41ff370f25e6bbbead09c4555cb0e18c4 | 07831f9929568b8a3671cb9a0d2ef3d9b7dc2c08 | /test/regression/NOX_QS/NOX_QS_Newton_NoPrec_np1/NOX_QS_Newton_NoPrec.py | 7c988343fd7957acd2aaffb5a5751fca9a86c90a | [
"BSD-3-Clause",
"BSD-2-Clause"
] | permissive | oldninja/peridigm | 2743ea5501138342f9baaf5182c0befc33da4d9b | ac49e53565d0130a3cbeda85e6bc6851ff68705b | refs/heads/master | 2021-01-05T01:46:03.876180 | 2020-11-25T17:23:04 | 2020-11-25T17:23:04 | 245,474,842 | 0 | 0 | NOASSERTION | 2020-11-08T08:08:09 | 2020-03-06T17:03:04 | null | UTF-8 | Python | false | false | 1,546 | py | #! /usr/bin/env python
import sys
import os
import re
import glob
from subprocess import Popen
test_dir = "NOX_QS/NOX_QS_Newton_NoPrec_np1"
base_name = "NOX_QS_Newton_NoPrec"
if __name__ == "__main__":
result = 0
# log file will be dumped if verbose option is given
verbose = False
if "-verbose" in sys.argv:
verbose = True
# change to the specified test directory
os.chdir(test_dir)
# open log file
log_file_name = base_name + ".log"
if os.path.exists(log_file_name):
os.remove(log_file_name)
logfile = open(log_file_name, 'w')
# remove old output files, if any
files_to_remove = glob.glob('*.e*')
for file in os.listdir(os.getcwd()):
if file in files_to_remove:
os.remove(file)
# run Peridigm
command = ["../../../../src/Peridigm", "../"+base_name+".yaml"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
# compare output files against gold files
command = ["../../../../scripts/exodiff", \
"-stat", \
"-f", \
"../NOX_QS.comp", \
base_name+".e", \
"../"+base_name+"_gold.e"]
p = Popen(command, stdout=logfile, stderr=logfile)
return_code = p.wait()
if return_code != 0:
result = return_code
logfile.close()
# dump the output if the user requested verbose
if verbose == True:
os.system("cat " + log_file_name)
sys.exit(result)
| [
"djlittl@sandia.gov"
] | djlittl@sandia.gov |
6d5d2be5a463e58fc1862feabe2bcc443fce727b | f07391f481150ad07cd5652a7b09cf1cd60d345f | /cmsplugin_container/cms_plugins.py | ee489a8a5b52902cedc985968117762177b4c1a3 | [] | no_license | django-cms-plugins/django-cmsplugin-container | 39dc956d1b7aa29132c0c841aa1d187da779e568 | c35d7111a6bd2c73de3d5df6a673497214df8e76 | refs/heads/master | 2021-01-21T15:07:12.658207 | 2013-07-23T14:56:19 | 2013-07-23T14:56:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,735 | py | #-*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from cms.plugin_pool import plugin_pool
from cms.plugin_base import CMSPluginBase
from cms.models import CMSPlugin
from cmsplugin_container.models import Container, Grid
from cmsplugin_container.forms import ContainerForm
class ContainerPlugin(CMSPluginBase):
model = Container
module = _("C")
name = _("Multi Columns")
render_template = "cms/plugins/container.html"
allow_children = True
child_classes = ["ColumnPlugin"]
form = ContainerForm
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder':placeholder,
})
return context
def save_model(self, request, obj, form, change):
response = super(MultiColumnPlugin, self).save_model(request, obj, form, change)
for x in xrange(int(form.cleaned_data['create'])):
col = Column(parent=obj, placeholder=obj.placeholder, language=obj.language, width=form.cleaned_data['create_width'], position=CMSPlugin.objects.filter(parent=obj).count(), plugin_type=ColumnPlugin.__name__)
col.save()
return response
class ColumnPlugin(CMSPluginBase):
model = Column
module = _("Multi Columns")
name = _("Column")
render_template = "cms/plugins/column.html"
#frontend_edit_template = 'cms/plugins/column_edit.html'
allow_children = True
def render(self, context, instance, placeholder):
context.update({
'instance': instance,
'placeholder':placeholder,
})
return context
plugin_pool.register_plugin(MultiColumnPlugin)
plugin_pool.register_plugin(ColumnPlugin)
| [
"jacob.rief@gmail.com"
] | jacob.rief@gmail.com |
bd9c708c01a9274f8594b9dad16bc2006de86d43 | d5e605e79afa760c87dfbb739af15d1cdd762310 | /grupo08/simulador/cliente_all.py~ | 1113d3079e08ce415c0fbe8253e0a28757a8ee04 | [] | no_license | francianespessanha/WebServices | 5bd997831dbf72db5abda0804df00781a84a6aa7 | 011fdda80863126d9f9b4c2cf107173e7b592e5e | refs/heads/master | 2021-01-20T19:42:09.852426 | 2013-05-03T18:29:56 | 2013-05-03T18:29:56 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | #!/usr/bin/env python
from SOAPpy import SOAPProxy
# conectando diretamente
proxy = SOAPProxy("http://localhost:8001")
print str(proxy.consultaProduto("001"))
print str(proxy.consultarCliente("001"))
print str(proxy.consultarFuncionario("001"))
| [
"estela.paulino@gmail.com"
] | estela.paulino@gmail.com | |
616de202a514fdbd9bd3d224e044df7efee8bebf | 587611b9259834e692537ccfceeb7ceecce0f654 | /exercism/python/ghost-gobble-arcade-game/arcade_game.py | a938a4ce369abbe7cba5db979d6f9ae16659bc8b | [] | no_license | mtykhenko/sandbox | c1e10dc08f1e1fea5f6ceb4287af89429c1b532a | 32c7e32de275af831e5fd1c2fe6388fd80c06251 | refs/heads/master | 2023-01-23T00:36:35.049356 | 2023-01-10T01:39:06 | 2023-01-10T01:39:06 | 107,407,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,760 | py | """Functions for implementing the rules of the classic arcade game Pac-Man."""
def eat_ghost(power_pellet_active, touching_ghost):
"""Verify that Pac-Man can eat a ghost if he is empowered by a power pellet.
:param power_pellet_active: bool - does the player have an active power pellet?
:param touching_ghost: bool - is the player touching a ghost?
:return: bool - can the ghost be eaten?
"""
return power_pellet_active and touching_ghost
def score(touching_power_pellet, touching_dot):
"""Verify that Pac-Man has scored when a power pellet or dot has been eaten.
:param touching_power_pellet: bool - is the player touching a power pellet?
:param touching_dot: bool - is the player touching a dot?
:return: bool - has the player scored or not?
"""
return touching_power_pellet or touching_dot
def lose(power_pellet_active, touching_ghost):
"""Trigger the game loop to end (GAME OVER) when Pac-Man touches a ghost without his power pellet.
:param power_pellet_active: bool - does the player have an active power pellet?
:param touching_ghost: bool - is the player touching a ghost?
:return: bool - has the player lost the game?
"""
return touching_ghost and not power_pellet_active
def win(has_eaten_all_dots, power_pellet_active, touching_ghost):
"""Trigger the victory event when all dots have been eaten.
:param has_eaten_all_dots: bool - has the player "eaten" all the dots?
:param power_pellet_active: bool - does the player have an active power pellet?
:param touching_ghost: bool - is the player touching a ghost?
:return: bool - has the player won the game?
"""
return has_eaten_all_dots and not lose(power_pellet_active, touching_ghost) | [
"maksym.tykhenko@gmail.com"
] | maksym.tykhenko@gmail.com |
0dfa3ad8b1db8c484c528310797f0ec686504412 | b21e5af64331f5571233c5712fac413df8dff312 | /CorePython/io/files.py | c3418de0206b960b25259d815023d69dffc26d93 | [] | no_license | vipinparambil/python_study | 65942366132e42d4f6223f4d8ee190be00ea1a75 | c4bf56b66f765eb82a320cb9c1bd2fd54ff21f31 | refs/heads/master | 2022-06-09T12:26:27.475978 | 2020-05-08T17:34:52 | 2020-05-08T17:34:52 | 258,800,336 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 289 | py | import sys
file = open("wasteland.txt", mode="rt", encoding="utf-8")
try:
for line in file:
sys.stdout.write(line)
finally:
file.close()
# with block
with open("wasteland.txt", mode="rt", encoding="utf-8") as f:
for line in f:
sys.stdout.write(line.strip())
| [
"vipinparambil@gmail.com"
] | vipinparambil@gmail.com |
47db17de360ac2b63bb8ee45b3ebe73eafc76d61 | d5929d2076af03f824a68a3f02f90f6dbd7671c3 | /Documentos/Python/Diagramacion/Ejercicios/2/7 2.py | 5ab323e70a90e5a8cbb8d6cbcd839bba65e3c527 | [] | no_license | xcabezaderadiox/EDD_Paradigmas | 5603822e06cd5edb0955dacbfbaedaf1449092dc | 5409883fb6aefb30834fad39a8e59d3d943d823f | refs/heads/master | 2021-08-15T11:42:52.935451 | 2017-11-17T20:11:11 | 2017-11-17T20:11:11 | 108,488,508 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,821 | py | print ('BIENVENID@ ALUMN@')
print ('Estas son las siguientes notas en Analisis: ')
print ('Nota examen')
NOTA_EXAMEN = int(input())
print ('TP NUMERO 1')
TP1 = int(input())
print ('TP NUMERO 2')
TP2 = int(input())
print ('TP NUMERO 3')
TP3 = int(input())
print ('Promedio de TPS')
PROMEDIO_DE_TPS = (TP1 + TP2 + TP3) / 3
print (str(PROMEDIO_DE_TPS))
NOTA_FINAL_ANALISIS = (PROMEDIO_DE_TPS * 0.10) + (NOTA_EXAMEN * 0.90)
print ('El promedio final de Analisis es de: ')
print (str(NOTA_FINAL_ANALISIS))
print ()
print ('Estas son las siguientes notas en Algebra: ')
print ('Nota examen')
NOTA_EXAMEN = int(input())
print ('TP NUMERO 1')
TP1 = int(input())
print ('TP NUMERO 2')
TP2 = int(input())
print ('Promedio de TPS')
PROMEDIO_DE_TPS = (TP1 + TP2) / 3
print (str(PROMEDIO_DE_TPS))
NOTA_FINAL_ALGEBRA = (PROMEDIO_DE_TPS * 0.20) + (NOTA_EXAMEN * 0.80)
print ('El promedio final de Algebra es de: ')
print (str(NOTA_FINAL_ALGEBRA))
print ()
print ('Estas son las siguientes notas en Programacion: ')
print ('Nota examen')
NOTA_EXAMEN = int(input())
print ('TP NUMERO 1')
TP1 = int(input())
print ('TP NUMERO 2')
TP2 = int(input())
print ('TP NUMERO 3')
TP3 = int(input())
print ('Promedio de TPS')
PROMEDIO_DE_TPS = (TP1 + TP2 + TP3) / 3
print (str(PROMEDIO_DE_TPS))
NOTA_FINAL_PROGRAMACION = (PROMEDIO_DE_TPS * 0.10) + (NOTA_EXAMEN * 0.90)
print ('El promedio final de Programacion es de: ')
print (str(NOTA_FINAL_PROGRAMACION))
print ()
print ('El promedio general sera de: ')
PROMEDIO_GENERAL = (NOTA_FINAL_ALGEBRA + NOTA_FINAL_ANALISIS + NOTA_FINAL_PROGRAMACION) / 3
print (str(PROMEDIO_GENERAL))
print ()
if PROMEDIO_GENERAL > 6:
print ('Felicitaciones!!!!!!')
else:
print ('A estudiar mas por favor Alumno')
print ()
print ('GRACIAS')
print ()
print ()
print ()
print ('***Desing by @xcabezaderadiox***')
| [
"christianmontesdeoca89@yahoo.com.ar"
] | christianmontesdeoca89@yahoo.com.ar |
a5865745f7d3f7da98361b21a9792d2a879fc5a2 | 5496337758c4f908695445d5025dd11c8fe4bd71 | /Laba 2.py | 4c1d38fd4948b84ed3c6ee7bff34f34bab4e2ccd | [] | no_license | Chakazula123/Math-Modeling_10_class | dec8dd95dea9ec2d1b2a393292e03b69643001a6 | ebdffb8b19d4eedeec707191424b5ab7c1e02bfb | refs/heads/master | 2020-08-08T01:56:26.313937 | 2019-10-22T15:48:42 | 2019-10-22T15:48:42 | 213,667,813 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 213 | py | print('--------Задание 2---------')
import my_module as mc
import numpy as np
import math as mt
v=((mc.g)*(mc.h)*(np.tan(mc.b))**2/(2*(np.cos(mc.a))**2*(1-(np.tan(mc.b))*(np.tan(mc.a)))))**0.5
print(v) | [
"noreply@github.com"
] | noreply@github.com |
a6b94171e773217e8d86aa4794a7f531b936d445 | 13562737273dc553480be2c42a1575f710bbecac | /mobilenet_fpn/lib/nets/mobilenet_v2.py | fc9c76d422bda66bd4c953524da51d6aca80f5d5 | [] | no_license | violet2020/TF20200104 | 6cee49e3cdd156871f31e78a3fcc1880826dd7d5 | 134c57137b3b2da791026340eb9f260fac1b9f77 | refs/heads/master | 2020-12-04T10:34:06.821016 | 2020-01-13T10:28:50 | 2020-01-13T10:28:50 | 231,728,600 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,042 | py | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementation of Mobilenet V2.
Architecture: https://arxiv.org/abs/1801.04381
The base model gives 72.2% accuracy on ImageNet, with 300MMadds,
3.4 M parameters.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import functools
import tensorflow as tf
from . import conv_blocks as ops
from . import mobilenet as lib
slim = tf.contrib.slim
op = lib.op
expand_input = ops.expand_input_by_factor
# pyformat: disable
# Architecture: https://arxiv.org/abs/1801.04381
V2_DEF = dict(
defaults={
# Note: these parameters of batch norm affect the architecture
# that's why they are here and not in training_scope.
(slim.batch_norm,): {'center': True, 'scale': True},
(slim.conv2d, slim.fully_connected, slim.separable_conv2d): {
'normalizer_fn': slim.batch_norm, 'activation_fn': tf.nn.relu6
},
(ops.expanded_conv,): {
'expansion_size': expand_input(6),
'split_expansion': 1,
'normalizer_fn': slim.batch_norm,
'residual': True
},
(slim.conv2d, slim.separable_conv2d): {'padding': 'SAME'}
},
spec=[
op(slim.conv2d, stride=2, num_outputs=32, kernel_size=[3, 3]),
op(ops.expanded_conv,
expansion_size=expand_input(1, divisible_by=1),
num_outputs=16),
op(ops.expanded_conv, stride=2, num_outputs=24),
op(ops.expanded_conv, stride=1, num_outputs=24),
op(ops.expanded_conv, stride=2, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=1, num_outputs=32),
op(ops.expanded_conv, stride=2, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=64),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=1, num_outputs=96),
op(ops.expanded_conv, stride=2, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=160),
op(ops.expanded_conv, stride=1, num_outputs=320),
op(slim.conv2d, stride=1, kernel_size=[1, 1], num_outputs=1280)
],
)
# pyformat: enable
@slim.add_arg_scope
def mobilenet(input_tensor,
num_classes=1001,
depth_multiplier=1.0,
scope='MobilenetV2',
conv_defs=None,
finegrain_classification_mode=False,
min_depth=None,
divisible_by=None,
activation_fn=None,
**kwargs):
"""Creates mobilenet V2 network.
Inference mode is created by default. To create training use training_scope
below.
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
Args:
input_tensor: The input tensor
num_classes: number of classes
depth_multiplier: The multiplier applied to scale number of
channels in each layer. Note: this is called depth multiplier in the
paper but the name is kept for consistency with slim's model builder.
scope: Scope of the operator
conv_defs: Allows to override default conv def.
finegrain_classification_mode: When set to True, the model
will keep the last layer large even for small multipliers. Following
https://arxiv.org/abs/1801.04381
suggests that it improves performance for ImageNet-type of problems.
*Note* ignored if final_endpoint makes the builder exit earlier.
min_depth: If provided, will ensure that all layers will have that
many channels after application of depth multiplier.
divisible_by: If provided will ensure that all layers # channels
will be divisible by this number.
activation_fn: Activation function to use, defaults to tf.nn.relu6 if not
specified.
**kwargs: passed directly to mobilenet.mobilenet:
prediction_fn- what prediction function to use.
reuse-: whether to reuse variables (if reuse set to true, scope
must be given).
Returns:
logits/endpoints pair
Raises:
ValueError: On invalid arguments
"""
if conv_defs is None:
conv_defs = V2_DEF
if 'multiplier' in kwargs:
raise ValueError('mobilenetv2 doesn\'t support generic '
'multiplier parameter use "depth_multiplier" instead.')
if finegrain_classification_mode:
conv_defs = copy.deepcopy(conv_defs)
if depth_multiplier < 1:
conv_defs['spec'][-1].params['num_outputs'] /= depth_multiplier
if activation_fn:
conv_defs = copy.deepcopy(conv_defs)
defaults = conv_defs['defaults']
conv_defaults = (
defaults[(slim.conv2d, slim.fully_connected, slim.separable_conv2d)])
conv_defaults['activation_fn'] = activation_fn
depth_args = {}
# NB: do not set depth_args unless they are provided to avoid overriding
# whatever default depth_multiplier might have thanks to arg_scope.
if min_depth is not None:
depth_args['min_depth'] = min_depth
if divisible_by is not None:
depth_args['divisible_by'] = divisible_by
with slim.arg_scope((lib.depth_multiplier,), **depth_args):
return lib.mobilenet(
input_tensor,
num_classes=num_classes,
conv_defs=conv_defs,
scope=scope,
multiplier=depth_multiplier,
**kwargs)
mobilenet.default_image_size = 224
def wrapped_partial(func, *args, **kwargs):
partial_func = functools.partial(func, *args, **kwargs)
functools.update_wrapper(partial_func, func)
return partial_func
# Wrappers for mobilenet v2 with depth-multipliers. Be noticed that
# 'finegrain_classification_mode' is set to True, which means the embedding
# layer will not be shrinked when given a depth-multiplier < 1.0.
mobilenet_v2_140 = wrapped_partial(mobilenet, depth_multiplier=1.4)
mobilenet_v2_050 = wrapped_partial(mobilenet, depth_multiplier=0.50,
finegrain_classification_mode=True)
mobilenet_v2_035 = wrapped_partial(mobilenet, depth_multiplier=0.35,
finegrain_classification_mode=True)
@slim.add_arg_scope
def mobilenet_base(input_tensor, depth_multiplier=1.0, **kwargs):
"""Creates base of the mobilenet (no pooling and no logits) ."""
return mobilenet(input_tensor,
depth_multiplier=depth_multiplier,
base_only=True, **kwargs)
def training_scope(**kwargs):
"""Defines MobilenetV2 training scope.
Usage:
with tf.contrib.slim.arg_scope(mobilenet_v2.training_scope()):
logits, endpoints = mobilenet_v2.mobilenet(input_tensor)
with slim.
Args:
**kwargs: Passed to mobilenet.training_scope. The following parameters
are supported:
weight_decay- The weight decay to use for regularizing the model.
stddev- Standard deviation for initialization, if negative uses xavier.
dropout_keep_prob- dropout keep probability
bn_decay- decay for the batch norm moving averages.
Returns:
An `arg_scope` to use for the mobilenet v2 model.
"""
return lib.training_scope(**kwargs)
__all__ = ['training_scope', 'mobilenet_base', 'mobilenet', 'V2_DEF']
| [
"noreply@github.com"
] | noreply@github.com |
edd48cf5a03c972cc97900de46d36464eea29caf | b44445cc8afde84686167affec9825b381caee8d | /core/admin.py | 481037bfcfd7d076e75d6f07231595127358de1b | [] | no_license | RodrigoBeltran/PaginaWebFinalizada | 974df52fa0144a2ad14ef9e2bd8b845a7fff53f6 | d616d554e875e8a305ea0e37bb070cd70570e098 | refs/heads/master | 2023-06-23T20:30:39.090223 | 2021-07-10T01:05:55 | 2021-07-10T01:05:55 | 384,579,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 315 | py | from django.contrib import admin
from .models import Distribuidor, GPU, Juegos, Rams, Gabinete, Procesador
# Register your models here.
admin.site.register(Distribuidor)
admin.site.register(Rams)
admin.site.register(Juegos)
admin.site.register(Gabinete)
admin.site.register(Procesador)
admin.site.register(GPU)
| [
"ma.valles@duocuc.cl"
] | ma.valles@duocuc.cl |
0282d4a368747ab0860a64a8100fceaaf0f6a852 | 342d01d22f453ef7cb6fea382da0071d71c418e4 | /moped-modeling-py/src/descutils.py | f0631d91e6d03600f1f6801636ae25b49e922a5c | [] | no_license | kanster/moped | 1ddf78cb2aa939e788fe0c0b5974797f58f46b87 | f8d74b6680915eca73265be656d72e7d346e1aa0 | refs/heads/master | 2021-01-19T04:25:16.861856 | 2016-08-01T02:08:29 | 2016-08-01T02:08:29 | 64,627,677 | 13 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,264 | py | #!/usr/bin/env python
################################################################################
#
# descutils.py: common feature utilities to apply on images
#
# Copyright: Carnegie Mellon University
# Author: Alvaro Collet (acollet@cs.cmu.edu)
#
################################################################################
""" descutils.py
Created by Alvaro Collet on 06/27/2011
Copyright (c) 2011 Carnegie Mellon University. All rights reserved.
"""
import glob
import os
# Feature types
FEAT_SIFT = 1
FEAT_SURF = 2
################################################################################
#
# BaseFeature class
#
################################################################################
class BaseFeature(object):
""" Virtual class that defines the common interface for our feature
descriptors.
"""
name = ''
""" Feature name"""
key_length = None
""" Keypoint length (in Sift, keypoint = [x y angle scale]"""
desc_length = None
""" Descriptor length"""
id = None
""" Feature identifier """
# ------------------------------------------------------------------------ #
def __init__(self, name=None, key_length = None, \
desc_length = None, id = None):
""" Init function."""
self.name = name
self.key_length = key_length
self.desc_length = desc_length
self.id = id
# ------------------------------------------------------------------------ #
def run(self, file):
""" Extract features from image file.
Usage: result = Feature.run(file)
Input:
file - string containing image name
Output:
result - feature output, in whatever format it understands (could
be a matrix, raw text, etc...)
"""
# You need to implement this in your subclass
pass
# ------------------------------------------------------------------------ #
def dump(self, result, out_file):
""" Dump features to disk.
Usage: Feature.dump(result, out_file)
Input:
result - feature output, in whatever format was output in 'run'.
out_file - string containing output file name.
Output:
-NONE- but your features will presumably be saved in your format
of choice.
"""
# You need to implement this in your subclass
pass
# ------------------------------------------------------------------------ #
def call(self, file, out_file):
""" Extract features from image file and dump them into file (run+dump)
Usage: Feature.call(file, out_file)
Input:
file - string containing image name
out_file - string containing output keypoints name
Output:
-NONE- but results are stored into file
"""
# You need to implement this in your subclass
pass
# ------------------------------------------------------------------------ #
def load(file):
""" Load features from disk.
Usage: keypoints, descriptors = Feature.load(file)
Input:
file - string containing file name with keypoints to load
Output:
keypoints - N-by-M matrix, containing M keypoints
descriptors - K-by-M matrix, containing M keypoint descriptors
"""
# You need to implement this in your subclass
pass
# --------------------------------------------------------------------------- #
def createDescFilename(file, desc_name):
""" Simple utility to generate output feature names from image files.
Usage: out_file = createDescFileName(input_file, feature.name)
Input:
input_file - String with Input filename
desc_name - String with feature name (e.g. 'SIFT', 'SURF')
Output:
out_file - output filename (e.g.: file.SIFT.key)
"""
# Get rid of file extension
fpath, fname_ext = os.path.split(file)
fname, ext = os.path.splitext(fname_ext)
return fname + '.' + desc_name + '.key'
# --------------------------------------------------------------------------- #
def RunFolder(feat, in_dir, out_dir = None, in_pattern='*.jpg', \
overwrite = False):
""" Execute a feature object on a set of files within a folder.
Usage: in_list, out_list = descutils.RunFolder(desc, in_dir, in_pattern, \
out_dir, overwrite)
in_list, out_list = descutils.RunFolder(desc, '/tmp/images', \
/tmp/model, '*.jpg')
Input:
feat - Object of class 'Feature'. For each file in the folder,
we call feature.dump(feature.run(file), out_file)
in_dir - Input folder where images are taken
out_dir - Output folder where keypoint feature images are stored
(If None, we set out_dir = in_dir)
in_pattern - pattern of files to search for (default: '*.jpg')
overwrite{false} - If true, force feature.run to be called on every
file, even if there is already a file
Output:
in_list - List of files we executed our feature in
out_list - List of output filenames containing feature lists
"""
out_dir = in_dir if out_dir is None else out_dir
# Find files
img_list = glob.glob(os.path.join(in_dir, in_pattern))
img_list.sort()
keys_list = list()
# Run features on files
for file in img_list:
output_name = os.path.join(out_dir, createDescFilename(file, \
feat.name))
keys_list.append(output_name)
# If file does not exist, do this
if (not os.path.exists(output_name) \
and not os.path.exists(output_name + '.gz')) \
or overwrite:
feat.call(file, output_name)
# result = feat.run(file)
# feat.dump(result, output_name)
else:
print ("There is already a keypoints file for " + output_name)
return img_list, keys_list
| [
"vandeweg@users.noreply.github.com"
] | vandeweg@users.noreply.github.com |
953ebf3bf301af11ff0e2a8d80fc3624fcfc21f7 | 13a37b1137519b7b34e9754836c3aac15fb514c6 | /mysrc/using_sys.py | 2f61d47e18a1ba5ceb7e3b1f22645275df0ebafc | [] | no_license | puma-wong/Python-pool | 303ff16cbbfa7a708deba9f4a785c4da16051948 | df8f2b9a26ee88a1042d175d17351f6483561576 | refs/heads/master | 2021-11-29T12:00:08.874185 | 2021-11-09T14:50:38 | 2021-11-09T14:50:38 | 3,459,749 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 129 | py | import sys
print 'The command line arguments are:'
for i in sys.argv:
print i
print '\nThe PYTHONPATH is', sys.path, '\n'
| [
"weibiao.h@gmail.com"
] | weibiao.h@gmail.com |
c3a7c950f9a23512bad117ac3626ffae1b2589b3 | 87ca11415d7d0d72e2dd7a98e8c89410236d6550 | /shoutVS.py | 4dd265da5879e6ee2239f71768bbeeb4428b45dd | [] | no_license | dom143/Python- | a960ee88d2a9f8afea994cea3a9641330510fc84 | 9c2ef0bde2f6eff82efc550d23297b0796e1b30f | refs/heads/master | 2022-08-20T09:49:45.636808 | 2020-05-26T15:29:12 | 2020-05-26T15:29:12 | 267,081,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,250 | py | a=[]
x=0
while True :
print('____________________\n\n ร้านรองเท้าของดอมดอม\n____________________\n')
print('1) Nike[n]\n2) Adidas[a]\n3) Show Pay[s]\n4) Exit[x]')
brand=(input(" -> Select Brand : "))
brand=brand.lower()
while brand=='n':
print("\n*********** Nike ***********\n\n 1) Nike Epic React\n 2) Nike Zoom fly 3\n 3) Nike Zoom Pegasus\n 4) Back")
gen=(input(" -> Select Generation : "))
gen=gen.lower()
if gen=='1' :
ne=('Nike Epic React:4990:-998:3992')
a.append(ne)
x+=3992
elif gen=='2' :
nz1=('Nike Zoom fly:3990:-798:3192')
a.append(nz1)
x+=3192
elif gen=='3' :
nz2=('Nike Zoom Pegasus:3590:-1077:2513')
a.append(nz2)
x+=2513
elif gen=='4' :
break
while brand=='a':
print("\n*********** Adidas ***********\n\n 1) Adidas Pusha T Ozweego\n 2) Adidas Ozweego 3\n 3) Adidas Sleek\n 4) Back")
gena=(input(" -> Select Generation : "))
gena=gena.lower()
if gena=='1' :
apt=('Adidas Pusha T Ozweego:5390:-1617:3773')
a.append(apt)
x+=3773
elif gena=='2' :
ao=('Adidas Ozweego:4300:-1290:3010')
a.append(ao)
x+=3010
elif gena=='3' :
asl=('Adidas Sleek:3590:-1077:2513')
a.append(asl)
x+=2513
elif gena=='4' :
break
if brand=='s' :
print('{0:-<80}'.format(""))
print('{0:' '<35}{1:' '<20}{2:' '<20}{3:' '<20}'.format('รุ่น','ราคา','ส่วนลด','จ่ายจริง'))
print('{0:-<80}'.format(""))
for d in a :
e=d.split(':')
print('{0[0]:<32}{0[1]:<20}{0[2]:<20}{0[3]:<20}'.format(e))
print('{0:-<80}'.format(""))
print("รวมเป็นเงิน %d"%x)
print('{0:-<80}'.format(""))
break
if brand=='x':
break
else :
print("-----Error-----")
| [
"noreply@github.com"
] | noreply@github.com |
49c9b831d7494a17b8b9e2e2a8847fe9fb7f86e6 | f928edfc876d715159521589a22485d9de45cc89 | /import_hourly_csv_to_mariadb_09.py | 666d2da36d65dde7c7db69c75b28ea5fa5820375 | [] | no_license | guitar79/AirKorea_Python | cd06432740e0b292ca6ad3cde7144717967f5190 | 8077eaa0b6c444d575a25c7f7b992477a36c8294 | refs/heads/master | 2020-08-05T01:05:47.209200 | 2019-10-28T03:36:14 | 2019-10-28T03:36:14 | 212,342,300 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,349 | py | '''
-*- coding: utf-8 -*-
Auther guitar79@naver.com
'''
#import numpy as np
import os
import pymysql
from datetime import datetime
#import warning
#import time
start_time=str(datetime.now())
#mariaDB info
db_host = '10.114.0.121'
db_user = 'modis'
db_pass = 'rudrlrhkgkrrh'
db_name = 'AirKorea'
tb_name = 'hourly_vc'
#base directory
drbase = '/media/guitar79/8T/RS_data/Remote_Sensing/2017RNE/airkorea/csv1/'
#db connect
conn= pymysql.connect(host=db_host, user=db_user, password=db_pass, db=db_name,\
charset='utf8mb4', local_infile=1, cursorclass=pymysql.cursors.DictCursor)
cur = conn.cursor()
cur.execute("SET SQL_MODE = \"NO_AUTO_VALUE_ON_ZERO\";\
SET time_zone = \"+00:00\";")
cur.execute("DROP TABLE IF EXISTS `%s`;" %(tb_name))
cur.execute("DROP TABLE IF EXISTS `Obs_info`;")
cur.execute("CREATE TABLE IF NOT EXISTS `Obs_info` (\
`Ocode` int(6) NOT NULL,\
`Oname` varchar(12) NOT NULL,\
`Region` varchar(20) NOT NULL,\
`Address` varchar(500) DEFAULT NULL,\
`Lat` float DEFAULT NULL,\
`Lon` float DEFAULT NULL,\
`Alt` float DEFAULT NULL,\
`Remarks` char(255) DEFAULT NULL,\
PRIMARY KEY (`Ocode`))\
ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;")
cur.execute("CREATE TABLE IF NOT EXISTS `%s` (\
`Region` varchar(20) DEFAULT NULL,\
`Ocode` int(6) NOT NULL,\
`Oname` varchar(12) DEFAULT NULL,\
`Otime` int(12) NOT NULL,\
`SO2` float DEFAULT NULL,\
`CO` float DEFAULT NULL,\
`O3` float DEFAULT NULL,\
`NO2` float DEFAULT NULL,\
`PM10` int(4) DEFAULT NULL,\
`PM25` int(4) DEFAULT NULL,\
`Address` varchar(200) DEFAULT NULL,\
`id` int(11) NOT NULL AUTO_INCREMENT PRIMARY KEY,\
CONSTRAINT FK_Ocode FOREIGN KEY (`Ocode`) REFERENCES Obs_info(`Ocode`)\
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;"\
%(tb_name))
'''
cur.execute("CREATE TABLE IF NOT EXISTS `%s` (\
`Ocode` int(6) NOT NULL,\
`Otime` int(12) NOT NULL,\
`SO2` float DEFAULT NULL,\
`CO` float DEFAULT NULL,\
`O3` float DEFAULT NULL,\
`NO2` float DEFAULT NULL,\
`PM10` int(4) DEFAULT NULL,\
`PM25` int(4) DEFAULT NULL,\
`id` int(11) NOT NULL AUTO_INCREMENT,\
PRIMARY KEY (`id`),\
CONSTRAINT FK_Ocode FOREIGN KEY (`Ocode`) REFERENCES Obs_info(`Ocode`)\
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4;"\
%(tb_name))
'''
cur.execute("ALTER TABLE `%s`\
MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;" %(tb_name))
#delete all data in the table
print("TRUNCATE TABLE %s;" %(tb_name))
cur.execute("TRUNCATE TABLE %s;" %(tb_name))
conn.commit()
#log file
insert_log = open(drbase+'hourly_import_result.log', 'a')
error_log = open(drbase+'hourly_import_error.log', 'a')
for i in sorted(os.listdir(drbase),reverse=True):
#read csv files
if i[-4:] == '.csv':
print(i)
try :
print("LOAD DATA LOCAL \
INFILE '%s%s' \
INTO TABLE %s.%s \
FIELDS TERMINATED BY '\|' \
ENCLOSED BY '\"' \
LINES TERMINATED BY '\\n'\
IGNORE 1 LINES \
(`Region`, `Ocode`, `Oname`, `Otime`, \
`SO2`, `CO`, `O3`, `NO2`, `PM10`, `PM25`, `Address`);"\
%(drbase,i,db_name,tb_name))
cur.execute("LOAD DATA LOCAL \
INFILE '%s%s' \
INTO TABLE %s.%s \
FIELDS TERMINATED BY '\|' \
ENCLOSED BY '\"' \
LINES TERMINATED BY '\\n'\
IGNORE 1 LINES \
(`Region`, `Ocode`, `Oname`, `Otime`, \
`SO2`, `CO`, `O3`, `NO2`, `PM10`, `PM25`, `Address`);"\
%(drbase,i,db_name,tb_name))
conn.commit()
insert_log.write(drbase+i+" is inserted to the %s - %s\n"\
%(tb_name, datetime.now()))
except :
print(drbase+i+" is error : %s - %s\n"\
%(tb_name, datetime.now()))
error_log.write(drbase+i+" is error : %s - %s\n"\
%(tb_name, datetime.now()))
insert_log.close()
error_log.close()
print("CHECK TABLE %s.%s;" %(db_name, tb_name))
cur.execute("CHECK TABLE %s.%s;" %(db_name, tb_name))
conn.commit()
print("ALTER TABLE %s.%s ENGINE = InnoDB;" %(db_name, tb_name))
cur.execute("ALTER TABLE %s.%s ENGINE = InnoDB;" %(db_name, tb_name))
conn.commit()
print("OPTIMIZE TABLE %s.%s;" %(db_name, tb_name))
cur.execute("OPTIMIZE TABLE %s.%s;" %(db_name, tb_name))
conn.commit()
'''
print("FLUSH TABLE %s.%s;" %(db_name, tb_name))
cur.execute("FLUSH TABLE %s.%s;" %(db_name, tb_name))
conn.commit()
'''
cur.close()
end_time = str(datetime.now())
print("start : "+ start_time+" end: "+end_time)
'''
http://localhost/phpMyAdmin/sql.php?db=AirKorea&table=houly_vc&back=tbl_operations.php&goto=tbl_operations.php&sql_query=ALTER+TABLE+%60houly_vc%60+ENGINE+%3D+InnoDB%3B&token=746c2350251eec3ab8bef717286d7272
'''
| [
"noreply@github.com"
] | noreply@github.com |
ed312867880b8b41459e2cbb72aa46b1af2dc936 | 550e34ae9a58866e18572a1a06f4360f9697513f | /venv/bin/pip | 2cf4d01fd52eadfadd74aa20016d08a86bc6f956 | [] | no_license | ak89224/Python_Projects | c31bb50b97a10f9c3a74b8d66d396de55848b6bb | 5669a89ac73895f1cdb72928e9730dbae11cfb32 | refs/heads/master | 2020-06-19T12:51:41.858940 | 2019-07-13T22:49:36 | 2019-07-13T22:49:36 | 196,712,828 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 391 | #!/home/shah/PycharmProjects/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"ak89224@gmail.com"
] | ak89224@gmail.com | |
ae1e8444b7e83511169be63c369f1ce2d53da1bd | f9462f3768fa058bd895a56b151da694664ce588 | /examples/713_no-op.py | 1a3dcf7281703f179d38d40bd7d138b5afd82c90 | [
"MIT"
] | permissive | ryanpennings/workshop_swinburne_2021 | 16a9a7e2c7134832f8f714b7b430376f1b67dfb2 | 820ef4e36e73ac950f40e1846739087180af2e1c | refs/heads/main | 2023-05-31T16:35:16.535310 | 2021-06-17T06:24:51 | 2021-06-17T06:24:51 | 377,373,107 | 0 | 0 | MIT | 2021-06-17T06:24:51 | 2021-06-16T04:45:02 | null | UTF-8 | Python | false | false | 418 | py | import compas_rrc as rrc
if __name__ == '__main__':
# Create Ros Client
ros = rrc.RosClient()
ros.run()
# Create ABB Client
abb = rrc.AbbClient(ros, '/rob1')
print('Connected.')
# No operation
done = abb.send_and_wait(rrc.Noop())
# Print feedback
print('Feedback = ', done)
# End of Code
print('Finished')
# Close client
ros.close()
ros.terminate()
| [
"casas@arch.ethz.ch"
] | casas@arch.ethz.ch |
c5d90f334d2ad652397bb2e7363d56a969035396 | 5dedc0312eb49b0a58ae381f4e1bddb1cdee9aea | /sk_jieba.py | 06e608e97285b1ec52e18eb676840b473a80f541 | [] | no_license | matt-bentley/Chinese-Text-Classification | b983a1825966c1ad084694ab3073bf3bdd337c51 | 696df0dd4fe93601c1a73a50b1b294edf4c12a00 | refs/heads/master | 2020-04-20T13:58:03.474266 | 2019-02-02T22:17:08 | 2019-02-02T22:17:08 | 168,884,279 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,771 | py | import data_helpers
from time import time
import sys
import os
import numpy as np
import scipy.sparse as sp
import matplotlib.pyplot as plt
import jieba
import jieba.posseg as pseg
from sklearn import feature_extraction, model_selection, preprocessing, linear_model, naive_bayes, metrics, svm
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.linear_model import SGDClassifier
from sklearn.svm import LinearSVC
from sklearn.metrics import confusion_matrix
from sklearn.metrics import classification_report
print("Loading data...")
trainDF = data_helpers.load_data_and_labels()
print('loaded ' + str(len(trainDF)) + ' rows')
# split the dataset into training and validation datasets 75/25
train_x, valid_x, train_y, valid_y = model_selection.train_test_split(trainDF['text'], trainDF['class'],test_size=0.2, random_state=42)
print('Train count: ' + str(len(train_x)))
print('Evaluation count: ' + str(len(valid_x)))
x = []
tokenized_corpus = []
for text in train_x:
line = " ".join(jieba.cut(text))
tokenized_corpus.append(line)
x.append(line)
tokenized_test_corpus = []
for text in valid_x:
line = " ".join(jieba.cut(text))
tokenized_test_corpus.append(line)
x.append(line)
print('Processing Count vectors...')
# create a count vectorizer object
count_vect = CountVectorizer(analyzer='word', token_pattern=r'\w{1,}')
count_vect.fit(x)
# transform the training and validation data using count vectorizer object
xtrain_count = count_vect.transform(tokenized_corpus)
xvalid_count = count_vect.transform(tokenized_test_corpus)
print('Processing Tf-Idf vectors...')
# word level tf-idf
tfidf_vect = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}')
tfidf_vect.fit(x)
xtrain_tfidf = tfidf_vect.transform(tokenized_corpus)
xvalid_tfidf = tfidf_vect.transform(tokenized_test_corpus)
# ngram level tf-idf
tfidf_vect_ngram = TfidfVectorizer(analyzer='word', token_pattern=r'\w{1,}', ngram_range=(1,2))
tfidf_vect_ngram.fit(x)
xtrain_tfidf_ngram = tfidf_vect_ngram.transform(tokenized_corpus)
xvalid_tfidf_ngram = tfidf_vect_ngram.transform(tokenized_test_corpus)
def train_model(classifier, feature_vector_train, labels, feature_vector_valid, is_neural_net=False):
# fit the training dataset on the classifier
classifier.fit(feature_vector_train, labels)
# predict the labels on validation dataset
predictions = classifier.predict(feature_vector_valid)
if is_neural_net:
predictions = predictions.argmax(axis=-1)
return metrics.accuracy_score(predictions, valid_y)
print('')
print('Training Logistic Regression...')
# Linear Classifier on Count Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_count, train_y, xvalid_count)
print("LR, Count Vectors: ", accuracy)
# Linear Classifier on Word Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf, train_y, xvalid_tfidf)
print("LR, WordLevel TF-IDF: ", accuracy)
# Linear Classifier on Ngram Level TF IDF Vectors
accuracy = train_model(linear_model.LogisticRegression(), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print("LR, N-Gram Vectors: ", accuracy)
print('')
print('Training SGD SVM...')
# Linear Classifier on Count Vectors
accuracy = train_model(SGDClassifier(loss='log', penalty='elasticnet', alpha=1e-6, max_iter=5, random_state=42), xtrain_count, train_y, xvalid_count)
print("SGD SV, Count Vectors: ", accuracy)
# Linear Classifier on Word Level TF IDF Vectors
accuracy = train_model(SGDClassifier(loss='log', penalty='elasticnet', alpha=1e-6, max_iter=5, random_state=42), xtrain_tfidf, train_y, xvalid_tfidf)
print("SGD SV, WordLevel TF-IDF: ", accuracy)
# Linear Classifier on Ngram Level TF IDF Vectors
accuracy = train_model(SGDClassifier(loss='log', penalty='elasticnet', alpha=1e-6, max_iter=5, random_state=42), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print("SGD SV, N-Gram Vectors: ", accuracy)
print('')
print('Training SVC SVM...')
# Linear Classifier on Count Vectors
accuracy = train_model(LinearSVC(random_state=111, loss='hinge',C=1.3), xtrain_count, train_y, xvalid_count)
print("LR, Count Vectors: ", accuracy)
# Linear Classifier on Word Level TF IDF Vectors
accuracy = train_model(LinearSVC(random_state=111, loss='hinge',C=1.3), xtrain_tfidf, train_y, xvalid_tfidf)
print("LR, WordLevel TF-IDF: ", accuracy)
# Linear Classifier on Ngram Level TF IDF Vectors
accuracy = train_model(LinearSVC(random_state=111, loss='hinge',C=1.3), xtrain_tfidf_ngram, train_y, xvalid_tfidf_ngram)
print("LR, N-Gram Vectors: ", accuracy) | [
"matthew_j_bentley@hotmail.com"
] | matthew_j_bentley@hotmail.com |
0c3cedf2685c67f2eb9d33bc6c35662dcaa91c7a | fb408595c1edee0be293302c6d7bfc0c77d37c46 | /CODEFORCE/AprilFools2019/d.py | 432b7019cabe3980ec625ddae3e2873b4e70eb90 | [] | no_license | as950118/Algorithm | 39ad25519fd0e42b90ddf3797a61239862ad79b5 | 739a7d4b569057cdb6b6faa74254512b83d02bb1 | refs/heads/master | 2023-07-21T12:38:00.653579 | 2023-07-19T06:57:17 | 2023-07-19T06:57:17 | 125,176,176 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 25 | py | input()
input()
print(4)
| [
"na_qa@icloud.com"
] | na_qa@icloud.com |
817e5a809cd9b42f85bc7b98f88cae0525ecf649 | 4bd555bc662b8182a2e7644976bfdb00ed5e1ebe | /PythonistaAppTemplate/PythonistaKit.framework/pylib/zipfile.py | 9a21f7db76f5b8bd0c14c61af5223034a5ba37a7 | [] | no_license | fhelmli/homeNOWG2 | a103df1ef97194dec9501dbda87ec1f7c111fb4a | e794fd87b296544542fd9dc7ac94c981c6312419 | refs/heads/master | 2020-04-04T13:40:20.417769 | 2019-01-30T21:41:04 | 2019-01-30T21:41:04 | 155,970,686 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 65,550 | py | #\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
#\input texinfo
"""
Read and write ZIP files.
"""
import struct, os, time, sys, shutil
import binascii, cStringIO, stat
import io
import re
import string
try:
import zlib # We may need its compression method
crc32 = zlib.crc32
except ImportError:
zlib = None
crc32 = binascii.crc32
__all__ = ["BadZipfile", "error", "ZIP_STORED", "ZIP_DEFLATED", "is_zipfile",
"ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile" ]
class BadZipfile(Exception):
pass
class LargeZipFile(Exception):
"""
Raised when writing a zipfile, the zipfile requires ZIP64 extensions
and those extensions are disabled.
"""
error = BadZipfile # The exception raised by this module
ZIP64_LIMIT = (1 << 31) - 1
ZIP_FILECOUNT_LIMIT = 1 << 16
ZIP_MAX_COMMENT = (1 << 16) - 1
# constants for Zip file compression methods
ZIP_STORED = 0
ZIP_DEFLATED = 8
# Other ZIP compression methods not supported
# Below are some formats and associated data for reading/writing headers using
# the struct module. The names and structures of headers/records are those used
# in the PKWARE description of the ZIP file format:
# http://www.pkware.com/documents/casestudies/APPNOTE.TXT
# (URL valid as of January 2008)
# The "end of central directory" structure, magic number, size, and indices
# (section V.I in the format document)
structEndArchive = "<4s4H2LH"
stringEndArchive = "PK\005\006"
sizeEndCentDir = struct.calcsize(structEndArchive)
_ECD_SIGNATURE = 0
_ECD_DISK_NUMBER = 1
_ECD_DISK_START = 2
_ECD_ENTRIES_THIS_DISK = 3
_ECD_ENTRIES_TOTAL = 4
_ECD_SIZE = 5
_ECD_OFFSET = 6
_ECD_COMMENT_SIZE = 7
# These last two indices are not part of the structure as defined in the
# spec, but they are used internally by this module as a convenience
_ECD_COMMENT = 8
_ECD_LOCATION = 9
# The "central directory" structure, magic number, size, and indices
# of entries in the structure (section V.F in the format document)
structCentralDir = "<4s4B4HL2L5H2L"
stringCentralDir = "PK\001\002"
sizeCentralDir = struct.calcsize(structCentralDir)
# indexes of entries in the central directory structure
_CD_SIGNATURE = 0
_CD_CREATE_VERSION = 1
_CD_CREATE_SYSTEM = 2
_CD_EXTRACT_VERSION = 3
_CD_EXTRACT_SYSTEM = 4
_CD_FLAG_BITS = 5
_CD_COMPRESS_TYPE = 6
_CD_TIME = 7
_CD_DATE = 8
_CD_CRC = 9
_CD_COMPRESSED_SIZE = 10
_CD_UNCOMPRESSED_SIZE = 11
_CD_FILENAME_LENGTH = 12
_CD_EXTRA_FIELD_LENGTH = 13
_CD_COMMENT_LENGTH = 14
_CD_DISK_NUMBER_START = 15
_CD_INTERNAL_FILE_ATTRIBUTES = 16
_CD_EXTERNAL_FILE_ATTRIBUTES = 17
_CD_LOCAL_HEADER_OFFSET = 18
# The "local file header" structure, magic number, size, and indices
# (section V.A in the format document)
structFileHeader = "<4s2B4HL2L2H"
stringFileHeader = "PK\003\004"
sizeFileHeader = struct.calcsize(structFileHeader)
_FH_SIGNATURE = 0
_FH_EXTRACT_VERSION = 1
_FH_EXTRACT_SYSTEM = 2
_FH_GENERAL_PURPOSE_FLAG_BITS = 3
_FH_COMPRESSION_METHOD = 4
_FH_LAST_MOD_TIME = 5
_FH_LAST_MOD_DATE = 6
_FH_CRC = 7
_FH_COMPRESSED_SIZE = 8
_FH_UNCOMPRESSED_SIZE = 9
_FH_FILENAME_LENGTH = 10
_FH_EXTRA_FIELD_LENGTH = 11
# The "Zip64 end of central directory locator" structure, magic number, and size
structEndArchive64Locator = "<4sLQL"
stringEndArchive64Locator = "PK\x06\x07"
sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator)
# The "Zip64 end of central directory" record, magic number, size, and indices
# (section V.G in the format document)
structEndArchive64 = "<4sQ2H2L4Q"
stringEndArchive64 = "PK\x06\x06"
sizeEndCentDir64 = struct.calcsize(structEndArchive64)
_CD64_SIGNATURE = 0
_CD64_DIRECTORY_RECSIZE = 1
_CD64_CREATE_VERSION = 2
_CD64_EXTRACT_VERSION = 3
_CD64_DISK_NUMBER = 4
_CD64_DISK_NUMBER_START = 5
_CD64_NUMBER_ENTRIES_THIS_DISK = 6
_CD64_NUMBER_ENTRIES_TOTAL = 7
_CD64_DIRECTORY_SIZE = 8
_CD64_OFFSET_START_CENTDIR = 9
def _check_zipfile(fp):
try:
if _EndRecData(fp):
return True # file has correct magic number
except IOError:
pass
return False
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except IOError:
pass
return result
def _EndRecData64(fpin, offset, endrec):
"""
Read the ZIP64 end-of-archive records and use that to update endrec
"""
try:
fpin.seek(offset - sizeEndCentDir64Locator, 2)
except IOError:
# If the seek fails, the file is not large enough to contain a ZIP64
# end-of-archive record, so just return the end record we were given.
return endrec
data = fpin.read(sizeEndCentDir64Locator)
if len(data) != sizeEndCentDir64Locator:
return endrec
sig, diskno, reloff, disks = struct.unpack(structEndArchive64Locator, data)
if sig != stringEndArchive64Locator:
return endrec
if diskno != 0 or disks != 1:
raise BadZipfile("zipfiles that span multiple disks are not supported")
# Assume no 'zip64 extensible data'
fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2)
data = fpin.read(sizeEndCentDir64)
if len(data) != sizeEndCentDir64:
return endrec
sig, sz, create_version, read_version, disk_num, disk_dir, \
dircount, dircount2, dirsize, diroffset = \
struct.unpack(structEndArchive64, data)
if sig != stringEndArchive64:
return endrec
# Update the original endrec using data from the ZIP64 record
endrec[_ECD_SIGNATURE] = sig
endrec[_ECD_DISK_NUMBER] = disk_num
endrec[_ECD_DISK_START] = disk_dir
endrec[_ECD_ENTRIES_THIS_DISK] = dircount
endrec[_ECD_ENTRIES_TOTAL] = dircount2
endrec[_ECD_SIZE] = dirsize
endrec[_ECD_OFFSET] = diroffset
return endrec
def _EndRecData(fpin):
"""Return data from the "End of Central Directory" record, or None.
The data is a list of the nine items in the ZIP "End of central dir"
record followed by a tenth item, the file seek offset of this record."""
# Determine file size
fpin.seek(0, 2)
filesize = fpin.tell()
# Check to see if this is ZIP file with no archive comment (the
# "end of central directory" structure should be the last item in the
# file if this is the case).
try:
fpin.seek(-sizeEndCentDir, 2)
except IOError:
return None
data = fpin.read()
if (len(data) == sizeEndCentDir and
data[0:4] == stringEndArchive and
data[-2:] == b"\000\000"):
# the signature is correct and there's no comment, unpack structure
endrec = struct.unpack(structEndArchive, data)
endrec=list(endrec)
# Append a blank comment and record start offset
endrec.append("")
endrec.append(filesize - sizeEndCentDir)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, -sizeEndCentDir, endrec)
# Either this is not a ZIP file, or it is a ZIP file with an archive
# comment. Search the end of the file for the "end of central directory"
# record signature. The comment is the last item in the ZIP file and may be
# up to 64K long. It is assumed that the "end of central directory" magic
# number does not appear in the comment.
maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0)
fpin.seek(maxCommentStart, 0)
data = fpin.read()
start = data.rfind(stringEndArchive)
if start >= 0:
# found the magic number; attempt to unpack and interpret
recData = data[start:start+sizeEndCentDir]
if len(recData) != sizeEndCentDir:
# Zip file is corrupted.
return None
endrec = list(struct.unpack(structEndArchive, recData))
commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file
comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize]
endrec.append(comment)
endrec.append(maxCommentStart + start)
# Try to read the "Zip64 end of central directory" structure
return _EndRecData64(fpin, maxCommentStart + start - filesize,
endrec)
# Unable to find a valid end of central directory structure
return None
class ZipInfo (object):
"""Class with attributes describing each file in the ZIP archive."""
__slots__ = (
'orig_filename',
'filename',
'date_time',
'compress_type',
'comment',
'extra',
'create_system',
'create_version',
'extract_version',
'reserved',
'flag_bits',
'volume',
'internal_attr',
'external_attr',
'header_offset',
'CRC',
'compress_size',
'file_size',
'_raw_time',
)
def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)):
self.orig_filename = filename # Original file name in archive
# Terminate the file name at the first null byte. Null bytes in file
# names are used as tricks by viruses in archives.
null_byte = filename.find(chr(0))
if null_byte >= 0:
filename = filename[0:null_byte]
# This is used to ensure paths in generated ZIP files always use
# forward slashes as the directory separator, as required by the
# ZIP format specification.
if os.sep != "/" and os.sep in filename:
filename = filename.replace(os.sep, "/")
self.filename = filename # Normalized file name
self.date_time = date_time # year, month, day, hour, min, sec
if date_time[0] < 1980:
raise ValueError('ZIP does not support timestamps before 1980')
# Standard values:
self.compress_type = ZIP_STORED # Type of compression for the file
self.comment = "" # Comment for each file
self.extra = "" # ZIP extra data
if sys.platform == 'win32':
self.create_system = 0 # System which created ZIP archive
else:
# Assume everything else is unix-y
self.create_system = 3 # System which created ZIP archive
self.create_version = 20 # Version which created ZIP archive
self.extract_version = 20 # Version needed to extract archive
self.reserved = 0 # Must be zero
self.flag_bits = 0 # ZIP flag bits
self.volume = 0 # Volume number of file header
self.internal_attr = 0 # Internal attributes
self.external_attr = 0 # External file attributes
# Other attributes are set by class ZipFile:
# header_offset Byte offset to the file header
# CRC CRC-32 of the uncompressed file
# compress_size Size of the compressed file
# file_size Size of the uncompressed file
def FileHeader(self, zip64=None):
"""Return the per-file header as a string."""
dt = self.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
if self.flag_bits & 0x08:
# Set these to zero because we write them after the file data
CRC = compress_size = file_size = 0
else:
CRC = self.CRC
compress_size = self.compress_size
file_size = self.file_size
extra = self.extra
if zip64 is None:
zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT
if zip64:
fmt = '<HHQQ'
extra = extra + struct.pack(fmt,
1, struct.calcsize(fmt)-4, file_size, compress_size)
if file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT:
if not zip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
# File is larger than what fits into a 4 byte integer,
# fall back to the ZIP64 extension
file_size = 0xffffffff
compress_size = 0xffffffff
self.extract_version = max(45, self.extract_version)
self.create_version = max(45, self.extract_version)
filename, flag_bits = self._encodeFilenameFlags()
header = struct.pack(structFileHeader, stringFileHeader,
self.extract_version, self.reserved, flag_bits,
self.compress_type, dostime, dosdate, CRC,
compress_size, file_size,
len(filename), len(extra))
return header + filename + extra
def _encodeFilenameFlags(self):
if isinstance(self.filename, unicode):
try:
return self.filename.encode('ascii'), self.flag_bits
except UnicodeEncodeError:
return self.filename.encode('utf-8'), self.flag_bits | 0x800
else:
return self.filename, self.flag_bits
def _decodeFilename(self):
if self.flag_bits & 0x800:
return self.filename.decode('utf-8')
else:
return self.filename
def _decodeExtra(self):
# Try to decode the extra field.
extra = self.extra
unpack = struct.unpack
while extra:
tp, ln = unpack('<HH', extra[:4])
if tp == 1:
if ln >= 24:
counts = unpack('<QQQ', extra[4:28])
elif ln == 16:
counts = unpack('<QQ', extra[4:20])
elif ln == 8:
counts = unpack('<Q', extra[4:12])
elif ln == 0:
counts = ()
else:
raise RuntimeError, "Corrupt extra field %s"%(ln,)
idx = 0
# ZIP64 extension (large files and/or large archives)
if self.file_size in (0xffffffffffffffffL, 0xffffffffL):
self.file_size = counts[idx]
idx += 1
if self.compress_size == 0xFFFFFFFFL:
self.compress_size = counts[idx]
idx += 1
if self.header_offset == 0xffffffffL:
old = self.header_offset
self.header_offset = counts[idx]
idx+=1
extra = extra[ln+4:]
class _ZipDecrypter:
"""Class to handle decryption of files stored within a ZIP archive.
ZIP supports a password-based form of encryption. Even though known
plaintext attacks have been found against it, it is still useful
to be able to get data out of such a file.
Usage:
zd = _ZipDecrypter(mypwd)
plain_char = zd(cypher_char)
plain_text = map(zd, cypher_text)
"""
def _GenerateCRCTable():
"""Generate a CRC-32 table.
ZIP encryption uses the CRC32 one-byte primitive for scrambling some
internal keys. We noticed that a direct implementation is faster than
relying on binascii.crc32().
"""
poly = 0xedb88320
table = [0] * 256
for i in range(256):
crc = i
for j in range(8):
if crc & 1:
crc = ((crc >> 1) & 0x7FFFFFFF) ^ poly
else:
crc = ((crc >> 1) & 0x7FFFFFFF)
table[i] = crc
return table
crctable = _GenerateCRCTable()
def _crc32(self, ch, crc):
"""Compute the CRC32 primitive on one byte."""
return ((crc >> 8) & 0xffffff) ^ self.crctable[(crc ^ ord(ch)) & 0xff]
def __init__(self, pwd):
self.key0 = 305419896
self.key1 = 591751049
self.key2 = 878082192
for p in pwd:
self._UpdateKeys(p)
def _UpdateKeys(self, c):
self.key0 = self._crc32(c, self.key0)
self.key1 = (self.key1 + (self.key0 & 255)) & 4294967295
self.key1 = (self.key1 * 134775813 + 1) & 4294967295
self.key2 = self._crc32(chr((self.key1 >> 24) & 255), self.key2)
def __call__(self, c):
"""Decrypt a single character."""
c = ord(c)
k = self.key2 | 2
c = c ^ (((k * (k^1)) >> 8) & 255)
c = chr(c)
self._UpdateKeys(c)
return c
compressor_names = {
0: 'store',
1: 'shrink',
2: 'reduce',
3: 'reduce',
4: 'reduce',
5: 'reduce',
6: 'implode',
7: 'tokenize',
8: 'deflate',
9: 'deflate64',
10: 'implode',
12: 'bzip2',
14: 'lzma',
18: 'terse',
19: 'lz77',
97: 'wavpack',
98: 'ppmd',
}
class ZipExtFile(io.BufferedIOBase):
"""File-like object for reading an archive member.
Is returned by ZipFile.open().
"""
# Max size supported by decompressor.
MAX_N = 1 << 31 - 1
# Read from compressed files in 4k blocks.
MIN_READ_SIZE = 4096
# Search for universal newlines or line chunks.
PATTERN = re.compile(r'^(?P<chunk>[^\r\n]+)|(?P<newline>\n|\r\n?)')
def __init__(self, fileobj, mode, zipinfo, decrypter=None,
close_fileobj=False):
self._fileobj = fileobj
self._decrypter = decrypter
self._close_fileobj = close_fileobj
self._compress_type = zipinfo.compress_type
self._compress_size = zipinfo.compress_size
self._compress_left = zipinfo.compress_size
if self._compress_type == ZIP_DEFLATED:
self._decompressor = zlib.decompressobj(-15)
elif self._compress_type != ZIP_STORED:
descr = compressor_names.get(self._compress_type)
if descr:
raise NotImplementedError("compression type %d (%s)" % (self._compress_type, descr))
else:
raise NotImplementedError("compression type %d" % (self._compress_type,))
self._unconsumed = ''
self._readbuffer = ''
self._offset = 0
self._universal = 'U' in mode
self.newlines = None
# Adjust read size for encrypted files since the first 12 bytes
# are for the encryption/password information.
if self._decrypter is not None:
self._compress_left -= 12
self.mode = mode
self.name = zipinfo.filename
if hasattr(zipinfo, 'CRC'):
self._expected_crc = zipinfo.CRC
self._running_crc = crc32(b'') & 0xffffffff
else:
self._expected_crc = None
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find('\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = ''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == '':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + '\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
def peek(self, n=1):
"""Returns buffered bytes without advancing the position."""
if n > len(self._readbuffer) - self._offset:
chunk = self.read(n)
self._offset -= len(chunk)
# Return up to 512 bytes to reduce allocation overhead for tight loops.
return self._readbuffer[self._offset: self._offset + 512]
def readable(self):
return True
def read(self, n=-1):
"""Read and return up to n bytes.
If the argument is omitted, None, or negative, data is read and returned until EOF is reached..
"""
buf = ''
if n is None:
n = -1
while True:
if n < 0:
data = self.read1(n)
elif n > len(buf):
data = self.read1(n - len(buf))
else:
return buf
if len(data) == 0:
return buf
buf += data
def _update_crc(self, newdata, eof):
# Update the CRC using the given data.
if self._expected_crc is None:
# No need to compute the CRC if we don't have a reference value
return
self._running_crc = crc32(newdata, self._running_crc) & 0xffffffff
# Check the CRC if we're at the end of the file
if eof and self._running_crc != self._expected_crc:
raise BadZipfile("Bad CRC-32 for file %r" % self.name)
def read1(self, n):
"""Read up to n bytes with at most one read() system call."""
# Simplify algorithm (branching) by transforming negative n to large n.
if n < 0 or n is None:
n = self.MAX_N
# Bytes available in read buffer.
len_readbuffer = len(self._readbuffer) - self._offset
# Read from file.
if self._compress_left > 0 and n > len_readbuffer + len(self._unconsumed):
nbytes = n - len_readbuffer - len(self._unconsumed)
nbytes = max(nbytes, self.MIN_READ_SIZE)
nbytes = min(nbytes, self._compress_left)
data = self._fileobj.read(nbytes)
self._compress_left -= len(data)
if data and self._decrypter is not None:
data = ''.join(map(self._decrypter, data))
if self._compress_type == ZIP_STORED:
self._update_crc(data, eof=(self._compress_left==0))
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
else:
# Prepare deflated bytes for decompression.
self._unconsumed += data
# Handle unconsumed data.
if (len(self._unconsumed) > 0 and n > len_readbuffer and
self._compress_type == ZIP_DEFLATED):
data = self._decompressor.decompress(
self._unconsumed,
max(n - len_readbuffer, self.MIN_READ_SIZE)
)
self._unconsumed = self._decompressor.unconsumed_tail
eof = len(self._unconsumed) == 0 and self._compress_left == 0
if eof:
data += self._decompressor.flush()
self._update_crc(data, eof=eof)
self._readbuffer = self._readbuffer[self._offset:] + data
self._offset = 0
# Read from buffer.
data = self._readbuffer[self._offset: self._offset + n]
self._offset += len(data)
return data
def close(self):
try :
if self._close_fileobj:
self._fileobj.close()
finally:
super(ZipExtFile, self).close()
class ZipFile(object):
""" Class with methods to open, read, write, close, list zip files.
z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=False)
file: Either the path to the file, or a file-like object.
If it is a path, the file will be opened and closed by ZipFile.
mode: The mode can be either read "r", write "w" or append "a".
compression: ZIP_STORED (no compression) or ZIP_DEFLATED (requires zlib).
allowZip64: if True ZipFile will create files with ZIP64 extensions when
needed, otherwise it will raise an exception when this would
be necessary.
"""
fp = None # Set here since __del__ checks it
def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=False):
"""Open the ZIP file with mode read "r", write "w" or append "a"."""
if mode not in ("r", "w", "a"):
raise RuntimeError('ZipFile() requires mode "r", "w", or "a"')
if compression == ZIP_STORED:
pass
elif compression == ZIP_DEFLATED:
if not zlib:
raise RuntimeError,\
"Compression requires the (missing) zlib module"
else:
raise RuntimeError, "That compression method is not supported"
self._allowZip64 = allowZip64
self._didModify = False
self.debug = 0 # Level of printing: 0 through 3
self.NameToInfo = {} # Find file info given name
self.filelist = [] # List of ZipInfo instances for archive
self.compression = compression # Method of compression
self.mode = key = mode.replace('b', '')[0]
self.pwd = None
self._comment = ''
# Check if we were passed a file-like object
if isinstance(file, basestring):
self._filePassed = 0
self.filename = file
modeDict = {'r' : 'rb', 'w': 'wb', 'a' : 'r+b'}
try:
self.fp = open(file, modeDict[mode])
except IOError:
if mode == 'a':
mode = key = 'w'
self.fp = open(file, modeDict[mode])
else:
raise
else:
self._filePassed = 1
self.fp = file
self.filename = getattr(file, 'name', None)
try:
if key == 'r':
self._RealGetContents()
elif key == 'w':
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
elif key == 'a':
try:
# See if file is a zip file
self._RealGetContents()
# seek to start of directory and overwrite
self.fp.seek(self.start_dir, 0)
except BadZipfile:
# file is not a zip file, just append
self.fp.seek(0, 2)
# set the modified flag so central directory gets written
# even if no files are added to the archive
self._didModify = True
else:
raise RuntimeError('Mode must be "r", "w" or "a"')
except:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
raise
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def _RealGetContents(self):
"""Read in the table of contents for the ZIP file."""
fp = self.fp
try:
endrec = _EndRecData(fp)
except IOError:
raise BadZipfile("File is not a zip file")
if not endrec:
raise BadZipfile, "File is not a zip file"
if self.debug > 1:
print endrec
size_cd = endrec[_ECD_SIZE] # bytes in central directory
offset_cd = endrec[_ECD_OFFSET] # offset of central directory
self._comment = endrec[_ECD_COMMENT] # archive comment
# "concat" is zero, unless zip was concatenated to another file
concat = endrec[_ECD_LOCATION] - size_cd - offset_cd
if endrec[_ECD_SIGNATURE] == stringEndArchive64:
# If Zip64 extension structures are present, account for them
concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator)
if self.debug > 2:
inferred = concat + offset_cd
print "given, inferred, offset", offset_cd, inferred, concat
# self.start_dir: Position of start of central directory
self.start_dir = offset_cd + concat
fp.seek(self.start_dir, 0)
data = fp.read(size_cd)
fp = cStringIO.StringIO(data)
total = 0
while total < size_cd:
centdir = fp.read(sizeCentralDir)
if len(centdir) != sizeCentralDir:
raise BadZipfile("Truncated central directory")
centdir = struct.unpack(structCentralDir, centdir)
if centdir[_CD_SIGNATURE] != stringCentralDir:
raise BadZipfile("Bad magic number for central directory")
if self.debug > 2:
print centdir
filename = fp.read(centdir[_CD_FILENAME_LENGTH])
# Create ZipInfo instance to store file information
x = ZipInfo(filename)
x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH])
x.comment = fp.read(centdir[_CD_COMMENT_LENGTH])
x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET]
(x.create_version, x.create_system, x.extract_version, x.reserved,
x.flag_bits, x.compress_type, t, d,
x.CRC, x.compress_size, x.file_size) = centdir[1:12]
x.volume, x.internal_attr, x.external_attr = centdir[15:18]
# Convert date/time code to (year, month, day, hour, min, sec)
x._raw_time = t
x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F,
t>>11, (t>>5)&0x3F, (t&0x1F) * 2 )
x._decodeExtra()
x.header_offset = x.header_offset + concat
x.filename = x._decodeFilename()
self.filelist.append(x)
self.NameToInfo[x.filename] = x
# update total bytes read from central directory
total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH]
+ centdir[_CD_EXTRA_FIELD_LENGTH]
+ centdir[_CD_COMMENT_LENGTH])
if self.debug > 2:
print "total", total
def namelist(self):
"""Return a list of file names in the archive."""
l = []
for data in self.filelist:
l.append(data.filename)
return l
def infolist(self):
"""Return a list of class ZipInfo instances for files in the
archive."""
return self.filelist
def printdir(self):
"""Print a table of contents for the zip file."""
print "%-46s %19s %12s" % ("File Name", "Modified ", "Size")
for zinfo in self.filelist:
date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6]
print "%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size)
def testzip(self):
"""Read all the files and check the CRC."""
chunk_size = 2 ** 20
for zinfo in self.filelist:
try:
# Read by chunks, to avoid an OverflowError or a
# MemoryError with very large embedded files.
with self.open(zinfo.filename, "r") as f:
while f.read(chunk_size): # Check CRC-32
pass
except BadZipfile:
return zinfo.filename
def getinfo(self, name):
"""Return the instance of ZipInfo given 'name'."""
info = self.NameToInfo.get(name)
if info is None:
raise KeyError(
'There is no item named %r in the archive' % name)
return info
def setpassword(self, pwd):
"""Set default password for encrypted files."""
self.pwd = pwd
@property
def comment(self):
"""The comment text associated with the ZIP file."""
return self._comment
@comment.setter
def comment(self, comment):
# check for valid comment length
if len(comment) >= ZIP_MAX_COMMENT:
if self.debug:
print('Archive comment is too long; truncating to %d bytes'
% ZIP_MAX_COMMENT)
comment = comment[:ZIP_MAX_COMMENT]
self._comment = comment
self._didModify = True
def read(self, name, pwd=None):
"""Return file bytes (as a string) for name."""
return self.open(name, "r", pwd).read()
def open(self, name, mode="r", pwd=None):
"""Return file-like object for 'name'."""
if mode not in ("r", "U", "rU"):
raise RuntimeError, 'open() requires mode "r", "U", or "rU"'
if not self.fp:
raise RuntimeError, \
"Attempt to read ZIP archive that was already closed"
# Only open a new file for instances where we were not
# given a file object in the constructor
if self._filePassed:
zef_file = self.fp
should_close = False
else:
zef_file = open(self.filename, 'rb')
should_close = True
try:
# Make sure we have an info object
if isinstance(name, ZipInfo):
# 'name' is already an info object
zinfo = name
else:
# Get info object for name
zinfo = self.getinfo(name)
zef_file.seek(zinfo.header_offset, 0)
# Skip the file header:
fheader = zef_file.read(sizeFileHeader)
if len(fheader) != sizeFileHeader:
raise BadZipfile("Truncated file header")
fheader = struct.unpack(structFileHeader, fheader)
if fheader[_FH_SIGNATURE] != stringFileHeader:
raise BadZipfile("Bad magic number for file header")
fname = zef_file.read(fheader[_FH_FILENAME_LENGTH])
if fheader[_FH_EXTRA_FIELD_LENGTH]:
zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH])
if fname != zinfo.orig_filename:
raise BadZipfile, \
'File name in directory "%s" and header "%s" differ.' % (
zinfo.orig_filename, fname)
# check for encrypted flag & handle password
is_encrypted = zinfo.flag_bits & 0x1
zd = None
if is_encrypted:
if not pwd:
pwd = self.pwd
if not pwd:
raise RuntimeError, "File %s is encrypted, " \
"password required for extraction" % name
zd = _ZipDecrypter(pwd)
# The first 12 bytes in the cypher stream is an encryption header
# used to strengthen the algorithm. The first 11 bytes are
# completely random, while the 12th contains the MSB of the CRC,
# or the MSB of the file time depending on the header type
# and is used to check the correctness of the password.
bytes = zef_file.read(12)
h = map(zd, bytes[0:12])
if zinfo.flag_bits & 0x8:
# compare against the file type from extended local headers
check_byte = (zinfo._raw_time >> 8) & 0xff
else:
# compare against the CRC otherwise
check_byte = (zinfo.CRC >> 24) & 0xff
if ord(h[11]) != check_byte:
raise RuntimeError("Bad password for file", name)
return ZipExtFile(zef_file, mode, zinfo, zd,
close_fileobj=should_close)
except:
if should_close:
zef_file.close()
raise
def extract(self, member, path=None, pwd=None):
"""Extract a member from the archive to the current working directory,
using its full name. Its file information is extracted as accurately
as possible. `member' may be a filename or a ZipInfo object. You can
specify a different directory using `path'.
"""
if not isinstance(member, ZipInfo):
member = self.getinfo(member)
if path is None:
path = os.getcwd()
return self._extract_member(member, path, pwd)
def extractall(self, path=None, members=None, pwd=None):
"""Extract all members from the archive to the current working
directory. `path' specifies a different directory to extract to.
`members' is optional and must be a subset of the list returned
by namelist().
"""
if members is None:
members = self.namelist()
for zipinfo in members:
self.extract(zipinfo, path, pwd)
def _extract_member(self, member, targetpath, pwd):
"""Extract the ZipInfo object 'member' to a physical
file on the path targetpath.
"""
# build the destination pathname, replacing
# forward slashes to platform specific separators.
arcname = member.filename.replace('/', os.path.sep)
if os.path.altsep:
arcname = arcname.replace(os.path.altsep, os.path.sep)
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
arcname = os.path.splitdrive(arcname)[1]
arcname = os.path.sep.join(x for x in arcname.split(os.path.sep)
if x not in ('', os.path.curdir, os.path.pardir))
if os.path.sep == '\\':
# filter illegal characters on Windows
illegal = ':<>|"?*'
if isinstance(arcname, unicode):
table = {ord(c): ord('_') for c in illegal}
else:
table = string.maketrans(illegal, '_' * len(illegal))
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(os.path.sep))
arcname = os.path.sep.join(x for x in arcname if x)
targetpath = os.path.join(targetpath, arcname)
targetpath = os.path.normpath(targetpath)
# Create all upper directories if necessary.
upperdirs = os.path.dirname(targetpath)
if upperdirs and not os.path.exists(upperdirs):
os.makedirs(upperdirs)
if member.filename[-1] == '/':
if not os.path.isdir(targetpath):
os.mkdir(targetpath)
return targetpath
with self.open(member, pwd=pwd) as source, \
file(targetpath, "wb") as target:
shutil.copyfileobj(source, target)
return targetpath
def _writecheck(self, zinfo):
"""Check for errors before writing a file to the archive."""
if zinfo.filename in self.NameToInfo:
if self.debug: # Warning for duplicate names
print "Duplicate name:", zinfo.filename
if self.mode not in ("w", "a"):
raise RuntimeError, 'write() requires mode "w" or "a"'
if not self.fp:
raise RuntimeError, \
"Attempt to write ZIP archive that was already closed"
if zinfo.compress_type == ZIP_DEFLATED and not zlib:
raise RuntimeError, \
"Compression requires the (missing) zlib module"
if zinfo.compress_type not in (ZIP_STORED, ZIP_DEFLATED):
raise RuntimeError, \
"That compression method is not supported"
if zinfo.file_size > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
if zinfo.header_offset > ZIP64_LIMIT:
if not self._allowZip64:
raise LargeZipFile("Zipfile size would require ZIP64 extensions")
def write(self, filename, arcname=None, compress_type=None):
"""Put the bytes from filename into the archive under the name
arcname."""
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
st = os.stat(filename)
isdir = stat.S_ISDIR(st.st_mode)
mtime = time.localtime(st.st_mtime)
date_time = mtime[0:6]
# Create ZipInfo instance to store file information
if arcname is None:
arcname = filename
arcname = os.path.normpath(os.path.splitdrive(arcname)[1])
while arcname[0] in (os.sep, os.altsep):
arcname = arcname[1:]
if isdir:
arcname += '/'
zinfo = ZipInfo(arcname, date_time)
zinfo.external_attr = (st[0] & 0xFFFF) << 16L # Unix attributes
if compress_type is None:
zinfo.compress_type = self.compression
else:
zinfo.compress_type = compress_type
zinfo.file_size = st.st_size
zinfo.flag_bits = 0x00
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
if isdir:
zinfo.file_size = 0
zinfo.compress_size = 0
zinfo.CRC = 0
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
self.fp.write(zinfo.FileHeader(False))
return
with open(filename, "rb") as fp:
# Must overwrite CRC and sizes with correct data later
zinfo.CRC = CRC = 0
zinfo.compress_size = compress_size = 0
# Compressed size can be larger than uncompressed size
zip64 = self._allowZip64 and \
zinfo.file_size * 1.05 > ZIP64_LIMIT
self.fp.write(zinfo.FileHeader(zip64))
if zinfo.compress_type == ZIP_DEFLATED:
cmpr = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
else:
cmpr = None
file_size = 0
while 1:
buf = fp.read(1024 * 8)
if not buf:
break
file_size = file_size + len(buf)
CRC = crc32(buf, CRC) & 0xffffffff
if cmpr:
buf = cmpr.compress(buf)
compress_size = compress_size + len(buf)
self.fp.write(buf)
if cmpr:
buf = cmpr.flush()
compress_size = compress_size + len(buf)
self.fp.write(buf)
zinfo.compress_size = compress_size
else:
zinfo.compress_size = file_size
zinfo.CRC = CRC
zinfo.file_size = file_size
if not zip64 and self._allowZip64:
if file_size > ZIP64_LIMIT:
raise RuntimeError('File size has increased during compressing')
if compress_size > ZIP64_LIMIT:
raise RuntimeError('Compressed size larger than uncompressed size')
# Seek backwards and write file header (which will now include
# correct CRC and file sizes)
position = self.fp.tell() # Preserve current position in file
self.fp.seek(zinfo.header_offset, 0)
self.fp.write(zinfo.FileHeader(zip64))
self.fp.seek(position, 0)
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def writestr(self, zinfo_or_arcname, bytes, compress_type=None):
"""Write a file into the archive. The contents is the string
'bytes'. 'zinfo_or_arcname' is either a ZipInfo instance or
the name of the file in the archive."""
if not isinstance(zinfo_or_arcname, ZipInfo):
zinfo = ZipInfo(filename=zinfo_or_arcname,
date_time=time.localtime(time.time())[:6])
zinfo.compress_type = self.compression
zinfo.external_attr = 0600 << 16
else:
zinfo = zinfo_or_arcname
if not self.fp:
raise RuntimeError(
"Attempt to write to ZIP archive that was already closed")
if compress_type is not None:
zinfo.compress_type = compress_type
zinfo.file_size = len(bytes) # Uncompressed size
zinfo.header_offset = self.fp.tell() # Start of header bytes
self._writecheck(zinfo)
self._didModify = True
zinfo.CRC = crc32(bytes) & 0xffffffff # CRC-32 checksum
if zinfo.compress_type == ZIP_DEFLATED:
co = zlib.compressobj(zlib.Z_DEFAULT_COMPRESSION,
zlib.DEFLATED, -15)
bytes = co.compress(bytes) + co.flush()
zinfo.compress_size = len(bytes) # Compressed size
else:
zinfo.compress_size = zinfo.file_size
zip64 = zinfo.file_size > ZIP64_LIMIT or \
zinfo.compress_size > ZIP64_LIMIT
if zip64 and not self._allowZip64:
raise LargeZipFile("Filesize would require ZIP64 extensions")
self.fp.write(zinfo.FileHeader(zip64))
self.fp.write(bytes)
if zinfo.flag_bits & 0x08:
# Write CRC and file sizes after the file data
fmt = '<LQQ' if zip64 else '<LLL'
self.fp.write(struct.pack(fmt, zinfo.CRC, zinfo.compress_size,
zinfo.file_size))
self.fp.flush()
self.filelist.append(zinfo)
self.NameToInfo[zinfo.filename] = zinfo
def __del__(self):
"""Call the "close()" method in case the user forgot."""
self.close()
def close(self):
"""Close the file, and for mode "w" and "a" write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ("w", "a") and self._didModify: # write ending records
count = 0
pos1 = self.fp.tell()
for zinfo in self.filelist: # write central directory
count = count + 1
dt = zinfo.date_time
dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2]
dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2)
extra = []
if zinfo.file_size > ZIP64_LIMIT \
or zinfo.compress_size > ZIP64_LIMIT:
extra.append(zinfo.file_size)
extra.append(zinfo.compress_size)
file_size = 0xffffffff
compress_size = 0xffffffff
else:
file_size = zinfo.file_size
compress_size = zinfo.compress_size
if zinfo.header_offset > ZIP64_LIMIT:
extra.append(zinfo.header_offset)
header_offset = 0xffffffffL
else:
header_offset = zinfo.header_offset
extra_data = zinfo.extra
if extra:
# Append a ZIP64 field to the extra's
extra_data = struct.pack(
'<HH' + 'Q'*len(extra),
1, 8*len(extra), *extra) + extra_data
extract_version = max(45, zinfo.extract_version)
create_version = max(45, zinfo.create_version)
else:
extract_version = zinfo.extract_version
create_version = zinfo.create_version
try:
filename, flag_bits = zinfo._encodeFilenameFlags()
centdir = struct.pack(structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
except DeprecationWarning:
print >>sys.stderr, (structCentralDir,
stringCentralDir, create_version,
zinfo.create_system, extract_version, zinfo.reserved,
zinfo.flag_bits, zinfo.compress_type, dostime, dosdate,
zinfo.CRC, compress_size, file_size,
len(zinfo.filename), len(extra_data), len(zinfo.comment),
0, zinfo.internal_attr, zinfo.external_attr,
header_offset)
raise
self.fp.write(centdir)
self.fp.write(filename)
self.fp.write(extra_data)
self.fp.write(zinfo.comment)
pos2 = self.fp.tell()
# Write end-of-zip-archive record
centDirCount = count
centDirSize = pos2 - pos1
centDirOffset = pos1
if (centDirCount >= ZIP_FILECOUNT_LIMIT or
centDirOffset > ZIP64_LIMIT or
centDirSize > ZIP64_LIMIT):
# Need to write the ZIP64 end-of-archive records
zip64endrec = struct.pack(
structEndArchive64, stringEndArchive64,
44, 45, 45, 0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset)
self.fp.write(zip64endrec)
zip64locrec = struct.pack(
structEndArchive64Locator,
stringEndArchive64Locator, 0, pos2, 1)
self.fp.write(zip64locrec)
centDirCount = min(centDirCount, 0xFFFF)
centDirSize = min(centDirSize, 0xFFFFFFFF)
centDirOffset = min(centDirOffset, 0xFFFFFFFF)
endrec = struct.pack(structEndArchive, stringEndArchive,
0, 0, centDirCount, centDirCount,
centDirSize, centDirOffset, len(self._comment))
self.fp.write(endrec)
self.fp.write(self._comment)
self.fp.flush()
finally:
fp = self.fp
self.fp = None
if not self._filePassed:
fp.close()
class PyZipFile(ZipFile):
"""Class to create ZIP archives with Python library files and packages."""
def writepy(self, pathname, basename = ""):
"""Add all files from "pathname" to the ZIP archive.
If pathname is a package directory, search the directory and
all package subdirectories recursively for all *.py and enter
the modules into the archive. If pathname is a plain
directory, listdir *.py and enter all modules. Else, pathname
must be a Python *.py file and the module will be put into the
archive. Added modules are always module.pyo or module.pyc.
This method will compile the module.py into module.pyc if
necessary.
"""
dir, name = os.path.split(pathname)
if os.path.isdir(pathname):
initname = os.path.join(pathname, "__init__.py")
if os.path.isfile(initname):
# This is a package directory, add it
if basename:
basename = "%s/%s" % (basename, name)
else:
basename = name
if self.debug:
print "Adding package in", pathname, "as", basename
fname, arcname = self._get_codename(initname[0:-3], basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
dirlist = os.listdir(pathname)
dirlist.remove("__init__.py")
# Add all *.py files and package subdirectories
for filename in dirlist:
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if os.path.isdir(path):
if os.path.isfile(os.path.join(path, "__init__.py")):
# This is a package directory, add it
self.writepy(path, basename) # Recursive call
elif ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
# This is NOT a package directory, add its files at top level
if self.debug:
print "Adding files from directory", pathname
for filename in os.listdir(pathname):
path = os.path.join(pathname, filename)
root, ext = os.path.splitext(filename)
if ext == ".py":
fname, arcname = self._get_codename(path[0:-3],
basename)
if self.debug:
print "Adding", arcname
self.write(fname, arcname)
else:
if pathname[-3:] != ".py":
raise RuntimeError, \
'Files added with writepy() must end with ".py"'
fname, arcname = self._get_codename(pathname[0:-3], basename)
if self.debug:
print "Adding file", arcname
self.write(fname, arcname)
def _get_codename(self, pathname, basename):
"""Return (filename, archivename) for the path.
Given a module name path, return the correct file path and
archive name, compiling if necessary. For example, given
/python/lib/string, return (/python/lib/string.pyc, string).
"""
file_py = pathname + ".py"
file_pyc = pathname + ".pyc"
file_pyo = pathname + ".pyo"
if os.path.isfile(file_pyo) and \
os.stat(file_pyo).st_mtime >= os.stat(file_py).st_mtime:
fname = file_pyo # Use .pyo file
elif not os.path.isfile(file_pyc) or \
os.stat(file_pyc).st_mtime < os.stat(file_py).st_mtime:
import py_compile
if self.debug:
print "Compiling", file_py
try:
py_compile.compile(file_py, file_pyc, None, True)
except py_compile.PyCompileError,err:
print err.msg
fname = file_pyc
else:
fname = file_pyc
archivename = os.path.split(fname)[1]
if basename:
archivename = "%s/%s" % (basename, archivename)
return (fname, archivename)
def main(args = None):
import textwrap
USAGE=textwrap.dedent("""\
Usage:
zipfile.py -l zipfile.zip # Show listing of a zipfile
zipfile.py -t zipfile.zip # Test if a zipfile is valid
zipfile.py -e zipfile.zip target # Extract zipfile into target dir
zipfile.py -c zipfile.zip src ... # Create zipfile from sources
""")
if args is None:
args = sys.argv[1:]
if not args or args[0] not in ('-l', '-c', '-e', '-t'):
print USAGE
sys.exit(1)
if args[0] == '-l':
if len(args) != 2:
print USAGE
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
zf.printdir()
elif args[0] == '-t':
if len(args) != 2:
print USAGE
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
badfile = zf.testzip()
if badfile:
print("The following enclosed file is corrupted: {!r}".format(badfile))
print "Done testing"
elif args[0] == '-e':
if len(args) != 3:
print USAGE
sys.exit(1)
with ZipFile(args[1], 'r') as zf:
out = args[2]
for path in zf.namelist():
if path.startswith('./'):
tgt = os.path.join(out, path[2:])
else:
tgt = os.path.join(out, path)
tgtdir = os.path.dirname(tgt)
if not os.path.exists(tgtdir):
os.makedirs(tgtdir)
with open(tgt, 'wb') as fp:
fp.write(zf.read(path))
elif args[0] == '-c':
if len(args) < 3:
print USAGE
sys.exit(1)
def addToZip(zf, path, zippath):
if os.path.isfile(path):
zf.write(path, zippath, ZIP_DEFLATED)
elif os.path.isdir(path):
for nm in os.listdir(path):
addToZip(zf,
os.path.join(path, nm), os.path.join(zippath, nm))
# else: ignore
with ZipFile(args[1], 'w', allowZip64=True) as zf:
for src in args[2:]:
addToZip(zf, src, os.path.basename(src))
if __name__ == "__main__":
main()
| [
"tberk@gmx.at"
] | tberk@gmx.at |
daf1da1a639961efd5aa81fb41791833c8b2eb8d | 6b5a3e8de615791442bb0a5055edbbed3b7a858a | /py_stealth/config.py | 43f18a2baf9f5526462a56c3667cb8dadedbad06 | [] | no_license | cesarsl/ultimate-uo-trainer | 950b9a8aedcc3e7e8ed818477a726145c6cb2910 | 25f68f4c1f3a68eb80060b3e912791287ee50b09 | refs/heads/master | 2020-11-28T23:44:37.246663 | 2020-03-24T15:33:37 | 2020-03-24T15:33:37 | 229,952,033 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 174 | py |
STEALTH_CODEC = 'UTF_16LE'
SCRIPT_CODEC = 'UTF8' # your files encoding. py2 only
HOST = 'localhost'
SOCK_TIMEOUT = 10
MSG_TIMEOUT = 10
DEBUG = False
ERROR_FILTER = False
| [
"cesarsl@gmail.com"
] | cesarsl@gmail.com |
4d2f3d3d5ec4f52d5551c3eedac05787f4596acb | 3df0b8fc28854e3e5a4b048eecc74ee65704444b | /Tool/util.py | efddd38b982f036059d1c3f947fb3ae7bcff3471 | [] | no_license | Bright-Sheep/Hack4TreeSite | 7c711c34adbfaad32a7c5a63955d63f672c5d9d2 | ca795475da09fcd2e5135268a1049438f1574f69 | refs/heads/master | 2023-06-03T08:34:52.431941 | 2021-06-17T13:39:51 | 2021-06-17T13:39:51 | 376,833,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 740 | py | import json
from shapely.geometry import Point
from shapely.geometry.polygon import Polygon
from shapely.geometry.multipolygon import MultiPolygon
def coordinates_in_city(lat,lon,city="Marseille"):
# Les coordonnées sont en (lat,lon) mais les limites trouvées sont en (lon,lat)
point = Point(lon,lat)
with open('Tool/limit_marseille.json') as json_file:
data = json.load(json_file)
city_limits = list(filter(lambda feature: feature['properties']['nom'] == city, data['features']))
data_polygons = city_limits[0]["geometry"]['coordinates']
polygons = []
for data_polygon in data_polygons:
polygon = Polygon(data_polygon[0])
polygons.append(polygon)
multipolygon = MultiPolygon(polygons)
return multipolygon.contains(point) | [
"agpr95@yahoo.com"
] | agpr95@yahoo.com |
4feca85e222a9f2924ecd5bab182e3cbc059a4d3 | 6a278cce5af0cdb4ac07d6e980f9247f199f110b | /gui_VMWARE/startstop.py | 3f08affd6985e50860a7f2ba7460b3330d67afe1 | [] | no_license | donatiri/gui_biowolf | 5d65c7e1a1194e636537c1ac29d841133fb7369b | 0691bff43f30ceeefa1a728a60297a64e1066b5c | refs/heads/main | 2023-07-21T14:53:31.167894 | 2021-09-02T09:22:55 | 2021-09-02T09:22:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,777 | py | from PyQt4.QtCore import *
from PyQt4 import QtGui
import struct
import time
import datetime
import tables
from tables import *
from enum import Enum
from queue import Queue
import libusb1
import usb1
import ctypes
from ctypes import byref, create_string_buffer, c_int, sizeof, POINTER, \
cast, c_uint8, c_uint16, c_ubyte, string_at, c_void_p, cdll, addressof, \
c_char
import os
#length of the data packet
datalen = 64
def cp2130_libusb_write(handle, value): # value should 1 byte to write
buf = c_ubyte * 9
write_command_buf = buf(
0x00, 0x00,
0x01,
0x00,
0x01, 0x00, 0x00, 0x00, value)
# populate command buffer with value to write
#write_command_buf[8] = value
bytesWritten = c_int()
usbTimeout = 500
error_code = libusb1.libusb_bulk_transfer(handle, 0x02, write_command_buf, sizeof(write_command_buf), byref(bytesWritten), usbTimeout)
if error_code:
print('Error in bulk transfer (write command)! Error # {}'.format(error_code))
return False
return True
def cp2130_libusb_read(handle):
buf = c_ubyte * 8
read_command_buf = buf(
0x00, 0x00,
0x00,
0x00,
datalen, 0x00, 0x00, 0x00)
bytesWritten = c_int()
buf = c_ubyte * datalen
read_input_buf = buf()
bytesRead = c_int()
usbTimeout = 500
error_code = libusb1.libusb_bulk_transfer(handle, 0x02, read_command_buf, sizeof(read_command_buf), byref(bytesWritten), usbTimeout)
if error_code:
print('Error in bulk transfer (read command). Error # {}'.format(error_code))
return False
if bytesWritten.value != sizeof(read_command_buf):
print('Error in bulk transfer write size')
print(bytesWritten.value)
return False
error_code = libusb1.libusb_bulk_transfer(handle, 0x81, read_input_buf, sizeof(read_input_buf), byref(bytesRead), usbTimeout)
if error_code:
print('Error in bulk transfer (read buffer). Error # {}'.format(error_code))
return False
if bytesRead.value != sizeof(read_input_buf):
print('Error in bulk transfer - returned {} out of {} bytes'.format(bytesRead.value, sizeof(read_input_buf)))
return False
return read_input_buf
def set_gpio_chip_select(handle, number, mode):
buf = c_ubyte * 2
# GPIO (0-10), MODE 0x00: Specified chip select is disabled 0x01:
# Specified chip select is enabled during SPI transfers 0x02: Specified
# chip select is enabled during SPI transfers; allother chip selects are
# disabled
control_buf_out = buf(number, mode)
usbTimeout = 5000
error_code = libusb1.libusb_control_transfer(
handle, 0x40, 0x25, 0x0000, 0x0000, control_buf_out, sizeof(control_buf_out), usbTimeout)
if error_code != sizeof(control_buf_out):
print('Error in bulk transfer')
return False
return True
context = libusb1.libusb_context_p()
deviceList = libusb1.libusb_device_p_p()
deviceCount = 0
deviceDescriptor = libusb1.libusb_device_descriptor()
device = libusb1.libusb_device_p()
cp2130Handle = libusb1.libusb_device_handle_p()
kernelAttached = 0
dev_list = []
if libusb1.libusb_init(byref(context)) != 0:
print('Could not initialize libusb!')
deviceCount = libusb1.libusb_get_device_list(context, byref(deviceList))
if deviceCount <= 0:
print('No devices found!')
for i in range(0, deviceCount):
if libusb1.libusb_get_device_descriptor(deviceList[i], byref(deviceDescriptor)) == 0:
if (deviceDescriptor.idVendor == 0x10C4) and (deviceDescriptor.idProduct == 0x87A0):
dev_list.append(deviceList[i])
device = deviceList[i]
print('entro')
if libusb1.libusb_open(device, byref(cp2130Handle)) != 0:
print('Could not open device!')
print(deviceCount)
'''
if libusb1.libusb_kernel_driver_active(cp2130Handle, 0) != 0:
libusb1.libusb_detach_kernel_driver(cp2130Handle, 0)
self.kernelAttached = 1
print('return')
'''
if libusb1.libusb_claim_interface(cp2130Handle, 0) != 0:
print('Could not claim interface!')
print('return2')
print("Connected to CP2130")
# SET GPIO1 as CS
gpio_cs = 0
set_gpio_chip_select(cp2130Handle, gpio_cs, 1)
#USER CODE BEGIN
#command definition
en_data = 0xEF
ack_en = 0x55
dis_data = 0x22
while 1:
command_str = input("Please enter a command: 1) enable data 2) acknowledge enable 3) disable data 4) read:\n")
command_num = int(command_str)
#enable data
if command_num == 1:
print(f'You entered command: {command_num}')
cp2130_libusb_write(cp2130Handle, en_data)
'''
read_bytes=cp2130_libusb_read(cp2130Handle)
for i in range(datalen):
byte = read_bytes[i]
bytevalue = ctypes.c_ubyte(byte).value
print(bytevalue)
'''
# acknowledge enable
elif command_num == 2:
print(f'You entered command: {command_num}')
cp2130_libusb_write(cp2130Handle, ack_en)
read_bytes=cp2130_libusb_read(cp2130Handle)
for i in range(datalen):
byte = read_bytes[i]
bytevalue = ctypes.c_ubyte(byte).value
print(bytevalue)
#disable data
elif command_num == 3:
print(f'You entered command: {command_num}')
cp2130_libusb_write(cp2130Handle, dis_data)
#read
elif command_num == 4:
print(f'You entered command: {command_num}')
read_bytes=cp2130_libusb_read(cp2130Handle)
for i in range(datalen):
byte = read_bytes[i]
bytevalue = ctypes.c_ubyte(byte).value
print(bytevalue)
else:
print(f'wrong command')
#USER CODE END
released = libusb1.libusb_release_interface(cp2130Handle, 0)
if released:
print('Not released successfully')
| [
"donatiriccardo94@gmail.com"
] | donatiriccardo94@gmail.com |
1b7eed8f744ea11802a5df7be117bebf014e3e99 | 7f08abad82eebf29fbe5d8ab2761645d71089c53 | /web-apps/helloworld/helloworld.py | a9ea7fa6f32b34ec9c06cb9ce0574003ec084e99 | [] | no_license | leenamurgai/cs253-web-app-eng | 6f1ad5d7c64fbce84e2555d2af4d7c0830cff476 | d219923e6af93f42a557c6850f705b574a31fc89 | refs/heads/master | 2020-03-25T00:53:29.691420 | 2018-08-31T23:04:13 | 2018-08-31T23:04:13 | 143,210,829 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,706 | py | import webapp2
from valid_date import valid_day
from valid_date import valid_month
from valid_date import valid_year
from html_escape import escape_html
form="""
<form method="post">
What is your birthday?
<br>
<label> Day <input type="text" name="day" value="%(day)s"> </label>
<label> Month <input type="text" name="month" value="%(month)s"> </label>
<label> Year <input type="text" name="year" value="%(year)s"> </label>
<div style="color: red">%(error)s</div>
<br>
<br>
<input type="submit">
</form>
"""
class MainPage(webapp2.RequestHandler):
def write_form(self, error="", day="", month="", year=""):
self.response.out.write(form % {"error": error,
"day": escape_html(day),
"month": escape_html(month),
"year": escape_html(year)})
def get(self):
self.write_form()
def post(self):
user_day = self.request.get('day')
user_month = self.request.get('month')
user_year = self.request.get('year')
day = valid_day(user_day)
month = valid_month(user_month)
year = valid_year(user_year)
if not (day and month and year):
self.write_form("That doesn't look like a valid date.",
user_day, user_month, user_year)
else:
self.redirect("/thanks")
class ThanksHandler(webapp2.RequestHandler):
def get(self):
self.response.out.write("Thanks, that's a valid date!")
app = webapp2.WSGIApplication([('/', MainPage),
('/thanks', ThanksHandler)], debug=True)
| [
"code@leenamurgai.co.uk"
] | code@leenamurgai.co.uk |
97f031e8f649387e73049f799030bcf50f8486b3 | c3c4fc370de0d2c4938a1ef2afb50b0432790b60 | /fiend/urls.py | 16177af9d9ef32150c48faf8ac9ed093a63e9c67 | [] | no_license | mikezentz/fiend | 210a47b812220186c83ab9e21058f7c08626ef43 | d58ed107fc31d7fef65fc84a9621357e3224404e | refs/heads/master | 2022-12-11T15:06:44.325473 | 2018-12-07T01:59:08 | 2018-12-07T01:59:08 | 156,812,316 | 0 | 0 | null | 2021-06-10T21:18:49 | 2018-11-09T04:56:54 | JavaScript | UTF-8 | Python | false | false | 1,014 | py | """fiend URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
urlpatterns = [
path('admin/', admin.site.urls),
path('dashboard/', include('redditsearch.urls')),
path('accounts/', include('accounts.urls')),
path('', include('redditsearch.urls'))
]
# urlpatterns += staticfiles_urlpatterns()
| [
"mike@pop-os.localdomain"
] | mike@pop-os.localdomain |
105a9117038b30a098e30547683df67d15c627b8 | dd07d32524bd1cc021537cbc0472a39f68a3858d | /handlers/chat_list.py | 88693c2b1cf71f67b171420cc7cbe20e3366e9d3 | [] | no_license | maxnoodles/websockt_chat_robot | 2e23e5c7b371980f6a51d849a76b5f24dec21529 | 35d52b3ebacd8c8b8775ec842bf60b46067494e8 | refs/heads/master | 2022-11-13T02:38:36.433764 | 2020-06-18T03:51:03 | 2020-06-18T03:51:03 | 256,470,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,744 | py | # -*- coding:utf-8 -*-
#
# Author: chenjiaxin
# Date: 2020-04-13
from copy import deepcopy
from const import CONST
from orm import mongo_op
from util.chat_util import join_key
from handlers.comm import MongoHandlerMixin, BaseHandler
class ChatListHandler(BaseHandler, MongoHandlerMixin):
def get(self):
seller_id = self.get_argument(CONST.SELLER_ID, default='')
keyword = self.get_argument(CONST.KEYWORD, default='')
filter_dict = {
CONST.SELLER_ID: seller_id
}
if keyword:
filter_dict['$or'] = [
{CONST.NAME: {'$regex': keyword, '$options': 'i'}},
{CONST.EMAIL: {'$regex': keyword, '$options': 'i'}}
]
projection = {
CONST.ID: 0,
}
cursor = mongo_op.find(self.buyer_coll, filter_dict, projection=projection)
count = mongo_op.count_documents(self.buyer_coll, filter_dict)
buyer_list = []
for buyer in cursor:
buyer_temp = deepcopy(buyer)
# 避免数据库存了 None 的情况
buyer_id = buyer.get(CONST.BUYER_ID, '')
chat_room_key = join_key(seller_id, buyer_id)
filter_dict = {
CONST.CHAT_ROOM_KEY: chat_room_key
}
last_msg = mongo_op.find_one(
self.chat_msg_record_coll,
filter_dict,
sort=[(CONST.ID, -1)]
) or {}
buyer_temp[CONST.LAST_MSG] = last_msg.get(CONST.MSG_CONTENT)
buyer_temp[CONST.LAST_TIME] = last_msg.get(CONST.SEND_TIME)
buyer_list.append(buyer_temp)
self.success(**{CONST.BUYER_LIST: buyer_list, CONST.SELLER_ID: seller_id, CONST.COUNT: count})
| [
"chenjiaxin@touchdata.io"
] | chenjiaxin@touchdata.io |
4c625a2df47fccd5e8dcf32fa957a588f590cb3a | 4cc368e591ee18067609f5a493f9570ea67dc0ad | /web_analytics_project/mainapp/models.py | 830be3a3fcf6b88bb5bbe556d26b9e4030d67a00 | [] | no_license | 253Youssef/WebAnalytics-Project | 716384886ce069c74b2230a8bf2932c7b6085851 | c6177eaf57eb8b0436e1564db1dc9eb113213906 | refs/heads/master | 2020-05-18T18:36:39.222466 | 2019-05-17T23:03:15 | 2019-05-17T23:03:15 | 184,590,471 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from django.urls import reverse
# Create your models here.
class Post(models.Model):
title = models.CharField(max_length=100)
content = models.TextField()
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('mainapp:post-detail', kwargs={'pk': self.pk}) | [
"253youssefayman@gmail.com"
] | 253youssefayman@gmail.com |
1113dcdfb3bc80d5d421dc9b0dbc0fdd9b13f093 | 8c6cf0541b861714707f956e987438690568bc2c | /valid_password.py | 1a79af933f1e76ac88be7f5ec7551dc281b9f1b5 | [] | no_license | windellevega/stackleague | 8ed3029171ab95aefe410fb5ab0bf6e1aa205b9a | 54f7a020b74065bdcd6d314d358ddb488f66d07e | refs/heads/master | 2023-06-21T01:05:37.309207 | 2021-07-23T01:49:47 | 2021-07-23T01:49:47 | 350,924,917 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 927 | py | import math
import itertools
import re
import operator
import collections
def validate(password):
if len(password) < 8:
return False
if not any(char.islower() for char in password):
return False
if not any(char.isupper() for char in password):
return False
if any(char == ' ' or char == '_' for char in password):
return False
if not any(char in ('!@#$%^&*()') for char in password):
return False
return True
######################### END OF SOLUTION #########################
import unittest
class MyTestCase(unittest.TestCase):
def test___password(self):
self.assertEqual(validate("Asfdasdf!234"), True)
def test___white_spaces(self):
self.assertEqual(validate("Asfdasdf !234"), False)
def test___no_upper_case(self):
self.assertEqual(validate("sfdasdf!234"), False)
if __name__ == '__main__':
unittest.main() | [
"windellevega@gmail.com"
] | windellevega@gmail.com |
39488c26270cabe7fb0720f02e7f86e06baa8868 | db5264994305e8c926f89cb456f33bd3a4d64f76 | /Sklep zielarski/orders/migrations/0001_initial.py | c21bd1168a45aeac59c66f7e35c2afffd875dd47 | [] | no_license | marcinpelszyk/Django | 7842e20d5e8b213c4cd42c421c1db9ab7d5f01d5 | aff2b9bd20e978a22a4a98994bf8424892d3c82f | refs/heads/main | 2023-05-01T19:20:37.267010 | 2021-05-18T17:51:53 | 2021-05-18T17:51:53 | 356,532,628 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,201 | py | # Generated by Django 3.1.7 on 2021-05-08 19:15
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('store', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('full_name', models.CharField(max_length=50)),
('address1', models.CharField(max_length=250)),
('address2', models.CharField(max_length=250)),
('city', models.CharField(max_length=100)),
('phone', models.CharField(max_length=100)),
('post_code', models.CharField(max_length=20)),
('created', models.DateTimeField(auto_now_add=True)),
('updated', models.DateTimeField(auto_now=True)),
('total_paid', models.DecimalField(decimal_places=2, max_digits=5)),
('order_key', models.CharField(max_length=200)),
('billing_status', models.BooleanField(default=False)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_user', to=settings.AUTH_USER_MODEL)),
],
options={
'ordering': ('-created',),
},
),
migrations.CreateModel(
name='OrderItem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=5)),
('quantity', models.PositiveIntegerField(default=1)),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='items', to='orders.order')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order_items', to='store.product')),
],
),
]
| [
"marcin.pelszyk90@gmail.com"
] | marcin.pelszyk90@gmail.com |
5368e10c586e4215b5c8e3f625e26107c9cdac63 | 5b97095801c59098da69a97662f644a54e7e7be6 | /CS50-web-programming-python-django/projects/commerce/auctions/migrations/0024_auto_20201129_2144.py | 77a1a3fb12864ebc63f80691a862ddd8e02b63ca | [] | no_license | markmisener/harvardx-courses | 40f570c0c0557f8e2bcd72e32f34a2790d84d918 | 77e2dd63fda68aa09fc5ceb89d9d0cde11ae0885 | refs/heads/main | 2023-01-29T00:09:58.105746 | 2020-12-13T05:22:25 | 2020-12-13T05:22:25 | 315,108,443 | 0 | 0 | null | 2020-12-13T05:19:59 | 2020-11-22T18:46:56 | Python | UTF-8 | Python | false | false | 818 | py | # Generated by Django 3.1.3 on 2020-11-29 21:44
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0023_listing_active'),
]
operations = [
migrations.AddField(
model_name='listing',
name='winner',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='winning_user', to=settings.AUTH_USER_MODEL),
),
migrations.AlterField(
model_name='listing',
name='user',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='poster', to=settings.AUTH_USER_MODEL),
),
]
| [
"markmisener@gmail.com"
] | markmisener@gmail.com |
e12c18b70fd84928cbc1573546c00e0a3f3c9f0f | d3c9301df20c32ac0a985008a0bbb64b7ffdda54 | /pyredis/scen/pf.py | d369ed20b7c95c5206eb9afdd4f3c0fdcec3eb49 | [] | no_license | tianyuzhiyou/wan-pyredis | 3f66657ec4baee95fedb15d6a03384b8dc1f14ad | 3c175a03601f7a55efe3aabedd252f59838a6d77 | refs/heads/master | 2023-06-04T22:17:07.778354 | 2021-06-23T07:03:48 | 2021-06-23T07:03:48 | 379,200,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,776 | py | # -*- coding: utf-8 -*-
import hashlib
class PfCache(object):
def __init__(self, cache=None):
"""
@desc 封装redis的HyperLoglog结构,
这是是一种概率数据结构,
只能用在需要统计不那么精确的结果,
比如:一篇文章的查看不重复人数
:param cache:
"""
self._client = cache
def _get_signature(self, *args):
"""
@desc 获取签名
:param key:
:return:
"""
if not args:
return ""
value = "".join(args)
m = hashlib.md5()
m.update(value.encode("utf-8"))
return m.hexdigest()
def add(self, key, *args, **kwargs):
"""
@desc 向结构中添加数据
:param key:
:param args:[[],[]]
:param kwargs: ex=失效时间/s
:return: bool, 结构是否发生改变,即是否已添加
"""
if not args:
return False
ex = kwargs.pop("ex", None)
values = [self._get_signature(*data) for data in args]
b_res = self._client.pfadd(key, *values)
if ex:
self._client.expire(key, int(ex))
return bool(b_res)
def count(self, key, *args):
"""
@desc 获取历史所有的添加数据的个数
:param key:
:param args:key1, key2
:return:
"""
keys = [key]
if args:
keys += args
return self._client.execute_command('PFCOUNT', *keys)
def merge(self, to, *sources):
"""
@desc 合并形成一个新的并集
:param to: 新的结构体
:param sources: 源结构体列表
:return:
"""
return self._client.pfmerge(to, *sources) | [
"626004181@qq.com"
] | 626004181@qq.com |
0414b9b01748784c314fd0dfef81da76993e9938 | 32054ac29f0c3a6321f82736d5cea1e5dd142a0b | /104.maximum-depth-of-binary-tree.py | 5ac0c204a64e4ec50be567c67c2eb1688401b151 | [] | no_license | yliu8976/FufuLeetCode | af70f8179518449aa2414816f8592f1d970f15ed | f8b1de54f3af911f920ff0e86a66ee3a2263cc16 | refs/heads/master | 2023-04-05T06:23:57.827043 | 2021-04-11T11:20:15 | 2021-04-11T11:20:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,304 | py | #
# @lc app=leetcode id=104 lang=python3
#
# [104] Maximum Depth of Binary Tree
#
# https://leetcode.com/problems/maximum-depth-of-binary-tree/description/
#
# algorithms
# Easy (67.50%)
# Likes: 3354
# Dislikes: 87
# Total Accepted: 1M
# Total Submissions: 1.5M
# Testcase Example: '[3,9,20,null,null,15,7]'
#
# Given the root of a binary tree, return its maximum depth.
#
# A binary tree's maximum depth is the number of nodes along the longest path
# from the root node down to the farthest leaf node.
#
#
# Example 1:
#
#
# Input: root = [3,9,20,null,null,15,7]
# Output: 3
#
#
# Example 2:
#
#
# Input: root = [1,null,2]
# Output: 2
#
#
# Example 3:
#
#
# Input: root = []
# Output: 0
#
#
# Example 4:
#
#
# Input: root = [0]
# Output: 1
#
#
#
# Constraints:
#
#
# The number of nodes in the tree is in the range [0, 10^4].
# -100 <= Node.val <= 100
#
#
# @lc code=start
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def maxDepth(self, root: TreeNode) -> int:
if not root: return 0
res = max(self.maxDepth(root.left), self.maxDepth(root.right)) + 1
return res
# @lc code=end
| [
"frankxu@umich.edu"
] | frankxu@umich.edu |
165ca782092804494e27969dff1db2a7617c8440 | 7b45fdc06f2bfa6538271275de7b0595f068d0b6 | /user/usermanager.py | 7457daa5b9402e12e9c66c83ba7f2983b9712efd | [] | no_license | yu0428/pingtai_twice | ce6875d3fada0cd903c32243fe01ece60fad750a | bc1b942c0766e3fd8742a1754f3d07eceb279351 | refs/heads/master | 2021-01-09T21:50:41.791633 | 2015-06-21T04:48:29 | 2015-06-21T04:48:29 | 36,345,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 560 | py | #!/usr/bin/env python
__author__ = 'bing'
class User:
def __init__(self, name, passwd):
self.name = name
self.password = passwd
class UserManager:
"""UserManger is responsible for authenticating user informaiton or
creating a new user in database.
"""
def __init__(self):
"""
error contains the corresponding error messsage.
"""
self.error = ""
def authenticate(self, user):
# TODO
return True
def create_new_user(self, user):
# TODO
return True
| [
"cbyrr1@gmail.com"
] | cbyrr1@gmail.com |
4cbd1bf398bacd6d203b1f3e374298c7a5351948 | 6dbdbefe7f3879b5d2ee437771c7e58b8cdfdfcc | /asset_allocation_v1/shell/db/asset_is_investor_criteria.py | ef443fdb8780ccf19beb347bf95e4b323e8aae9c | [] | no_license | fagan2888/recommend-model | 367346e96d7f8d5917a99271a36806ae10f30d14 | 5b0ae6cad65b565f5d3938c5cd185c42124d684f | refs/heads/master | 2022-11-11T09:47:13.675877 | 2019-07-30T04:23:49 | 2019-07-30T04:24:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,715 | py | #coding=utf8
from sqlalchemy import MetaData, Table, select, func, literal_column
# import string
# from datetime import datetime, timedelta
import pandas as pd
# import os
# import sys
import logging
import database
from dateutil.parser import parse
logger = logging.getLogger(__name__)
#
# ra_markowitz
#
# def load(gids, xtypes=None):
# db = database.connection('asset')
# metadata = MetaData(bind=db)
# t1 = Table('ra_markowitz', metadata, autoload=True)
# columns = [
# t1.c.globalid,
# t1.c.ra_type,
# t1.c.ra_pool,
# t1.c.ra_reshape,
# t1.c.ra_name,
# ]
# s = select(columns)
# if gids is not None:
# s = s.where(t1.c.globalid.in_(gids))
# if xtypes is not None:
# s = s.where(t1.c.ra_type.in_(xtypes))
# df = pd.read_sql(s, db)
# return df
def save(gid, criteria_id, df):
fmt_columns = ['is_value']
fmt_precision = 6
if not df.empty:
df = database.number_format(df, fmt_columns, fmt_precision)
#
# 保存择时结果到数据库
#
db = database.connection('asset')
t2 = Table('is_investor_criteria', MetaData(bind=db), autoload=True)
columns = [literal_column(c) for c in (df.index.names + list(df.columns))]
s = select(columns, (t2.c.is_investor_id == gid)).where(t2.c.is_criteria_id == criteria_id)
df_old = pd.read_sql(s, db, index_col=['is_investor_id', 'is_criteria_id', 'is_date'], parse_dates=['is_date'])
if not df_old.empty:
df_old = database.number_format(df_old, fmt_columns, fmt_precision)
# 更新数据库
# print df_new.head()
# print df_old.head()
database.batch(db, t2, df, df_old, timestamp=False)
| [
"liulikun@gmail.com"
] | liulikun@gmail.com |
167b432f42540e3d03e42f06fed6a9f210ad66f4 | 16cabaf09583547a6034dc2d99086ed852f06c87 | /text to emoji gui.pyw | 66670cf96c27b4f91c5397882415210791b85005 | [] | no_license | NicolasCaceda/DiscordLetterText | be837b965726c2b63bbfcc369df2fd213bc3f548 | 0b0d65bb99c16fad15f5419ae0c5ea6f2b042c36 | refs/heads/master | 2020-03-26T15:15:08.770039 | 2018-08-16T19:53:38 | 2018-08-16T19:53:38 | 145,031,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,775 | pyw | import tkinter
class ConvertDiscordEmojiGUI:
def __init__(self):
self.main_window = tkinter.Tk()
self.main_window.title("Discord Emoji Text")
# top frame contains input label
self.top_frame = tkinter.Frame(self.main_window)
# middle first frame contains input entry
self.middle_first_frame = tkinter.Frame(self.main_window)
# middle second frame contains output label
self.middle_second_frame = tkinter.Frame(self.main_window)
# middle third frame contains output entry
self.middle_third_frame = tkinter.Frame(self.main_window)
# bottom frame contains both buttons
self.bottom_frame = tkinter.Frame(self.main_window)
self.input_label = tkinter.Label(self.top_frame,
text="Input")
self.input_entry = tkinter.Entry(self.middle_first_frame,
width=50)
self.output_label = tkinter.Label(self.middle_second_frame,
text="Output")
self.output_text = tkinter.Text(self.middle_third_frame,
width=50,
height=20,
state="disabled")
self.copy_button = tkinter.Button(self.bottom_frame,
text="Copy",
command=self.copy)
self.convert_button = tkinter.Button(self.bottom_frame,
text="Convert",
command=self.convert_string)
self.char_label = tkinter.Label(self.bottom_frame,
text="Total Characters: ")
self.char_counter_var = tkinter.StringVar()
self.char_counter_label = tkinter.Label(self.bottom_frame,
textvariable=self.char_counter_var)
self.top_frame.pack()
self.middle_first_frame.pack()
self.middle_second_frame.pack()
self.middle_third_frame.pack()
self.bottom_frame.pack()
self.input_label.pack(side="top")
self.input_entry.pack(side="top",
expand=True,)
self.output_label.pack(side="top")
self.output_text.pack(side="top",
expand=True)
self.convert_button.pack(side="left")
self.copy_button.pack(side="left")
self.char_label.pack(side="left")
self.char_counter_label.pack(side="right")
tkinter.mainloop()
def copy(self):
text = str(self.output_text.get("1.0", "end"))
self.output_text.clipboard_clear()
self.output_text.clipboard_append(text)
@staticmethod
def num_switch(number):
num_switch_dict = {
"0": ":zero: ",
"1": ":one: ",
"2": ":two: ",
"3": ":three: ",
"4": ":four: ",
"5": ":five: ",
"6": ":six: ",
"7": ":seven: ",
"8": ":eight: ",
"9": ":nine: "
}
discord_number = num_switch_dict.get(number)
return discord_number
@staticmethod
def char_switch(char):
char_switch_dict = {
"!": ":exclamation: ",
"?": ":question: ",
"#": ":hash: ",
"*": ":asterisk: ",
"+": ":heavy_plus_sign: ",
"-": ":heavy_minus_sign: ",
"$": ":heavy_dollar_sign: ",
"©": ":copyright: ",
"®": ":registered: ",
"™": ":tm: "
}
discord_char = char_switch_dict.get(char, char)
return discord_char
def convert_string(self):
self.output_text.configure(state="normal")
self.output_text.delete("1.0", "end")
in_string = str(self.input_entry.get()).lower()
out_string = ""
list_in_string = list(in_string)
i = 0
while i < len(list_in_string):
if list_in_string[i] == " ":
out_string += " "
elif list_in_string[i].isdigit():
out_string += self.num_switch(list_in_string[i])
elif not list_in_string[i].isdigit() and not list_in_string[i].isalpha():
out_string += self.char_switch(list_in_string[i])
else:
out_string += ":regional_indicator_" + list_in_string[i] + ": "
i += 1
out_string = out_string[:-1]
self.char_counter_var.set(len(out_string))
self.output_text.insert('end', out_string)
self.output_text.configure(state="disabled")
GUI = ConvertDiscordEmojiGUI()
| [
"nicolas.caceda@gmail.com"
] | nicolas.caceda@gmail.com |
636817d0c3b6740f1462067612714d62481e32b0 | b8b2206a952e01526c403d8fd61821ec109fdee1 | /nlp_sarcasm.py | 6ac481b8fa2d5be27a1a7eb2a0d8aad3b546608b | [] | no_license | jenny-chou/tensorflow_practices | 477ae44ccf817faeda88dcd271945385467e7498 | 4359d113b146ca141082c4fd49621a677e919523 | refs/heads/main | 2023-04-20T00:10:30.003404 | 2021-05-03T14:46:00 | 2021-05-03T14:46:00 | 363,962,695 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,697 | py | import numpy as np
import tensorflow as tf
# import matplotlib.pyplot as plt
import json
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Embedding, GlobalAveragePooling1D
# practice with Kaggle data
# initialize parameters
training_size = 20000
vocab_size = 10000
oov_token = "<OOV>"
max_length = 100
padding_type = 'post'
trunc_type = 'post'
embedding_dim = 16
num_epochs = 5
batch_size = 10
# read raw data from json
with open("Sarcasm_Headlines_Dataset.json", 'r') as file:
datastore = json.load(file)
# extract json
sentences, labels, urls = [], [], []
for item in datastore:
sentences.append(item['headline'])
labels.append(item['is_sarcastic'])
urls.append(item['article_link'])
# split train and test set
training_sentences = sentences[0:training_size]
testing_sentences = sentences[training_size:]
training_labels = labels[0:training_size]
testing_labels = labels[training_size:]
print(training_sentences[0])
# neural net only sees training data. So tokenizer should fit to only training data
tokenizer = Tokenizer(num_words=vocab_size, oov_token=oov_token)
tokenizer.fit_on_texts(training_sentences)
# word_index is dict of all the unique words and its index value
# length of dict is the number of unique tokens/words
word_index = tokenizer.word_index
print(len(word_index))
# convert training set to numerical sequence
training_sequences = tokenizer.texts_to_sequences(training_sentences)
# transform training sequence to same length by padding or truncating
training_padded = pad_sequences(training_sequences, padding=padding_type, maxlen=max_length, truncating=trunc_type)
print("training_sequences[0]:", training_sequences[0])
print("training_padded[0]:", training_padded[0])
print("training_padded.shape", training_padded.shape)
# convert and transform testing set
testing_sequences = tokenizer.texts_to_sequences(testing_sentences)
testing_padded = pad_sequences(testing_sequences, padding=padding_type, maxlen=max_length, truncating=trunc_type)
# convert to array to get it to work with TensorFlow 2.x
training_padded = np.array(training_padded)
training_labels = np.array(training_labels)
testing_padded = np.array(testing_padded)
testing_labels = np.array(testing_labels)
# create and compile embedded model
model = Sequential([
Embedding(vocab_size, embedding_dim, input_length=max_length),
GlobalAveragePooling1D(),
Dense(24, activation='relu'),
Dense(1, activation='sigmoid')])
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
"""
model.summary()
Model: "sequential"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 100, 16) 160000
_________________________________________________________________
global_average_pooling1d (Gl (None, 16) 0
_________________________________________________________________
dense (Dense) (None, 24) 408
_________________________________________________________________
dense_1 (Dense) (None, 1) 25
=================================================================
Total params: 160,433
Trainable params: 160,433
Non-trainable params: 0
"""
# fit model with training set and validate with testing set
history = model.fit(training_padded, training_labels, batch_size=batch_size, epochs=num_epochs,
validation_data=(testing_padded, testing_labels))
print(history.history['val_accuracy'])
print(history.history['accuracy'])
# # plot accuracy and loss
# def plot_graphs(history, string):
# plt.plot(history.history[string])
# plt.plot(history.history['val'+string])
# plt.xlabel("epochs")
# plt.ylabel(string)
# plt.legand([string, 'val_'+string])
# plt.show()
#
# plot_graphs(history, "accuracy")
# plot_graphs(history, "loss")
# # predict new sentences if they're sarcastic
# seed = [
# "granny starting to fear spiders in the garden might be real",
# "the weather today is bright and sunny"
# ]
# seed_seq = tokenizer.texts_to_sequences(seed)
# seed_pad = pad_sequences(seed_seq, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# print(model.predict(seed_pad))
# # use bidirectional LSTM layers to make prediction
# binary classification problem: Is this sentence sarcastic? or not sarcastic?
embedding_dim = 64
model2 = Sequential([
Embedding(vocab_size, embedding_dim),
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64, return_sequences=True)),
# wrap LSTM in Bidirectional:
# Looks at sentence forward & backward
# Learn best parameter in each directions and merge them
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(64)),
Dense(64, activation='relu'),
Dense(1, activation='sigmoid')
])
model2.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
"""
model2.summary()
Model: "sequential_1"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding_1 (Embedding) (None, None, 64) 640000
_________________________________________________________________
bidirectional (Bidirectional (None, None, 128) 66048
_________________________________________________________________
bidirectional_1 (Bidirection (None, 128) 98816
_________________________________________________________________
dense_2 (Dense) (None, 64) 8256
_________________________________________________________________
dense_3 (Dense) (None, 1) 65
=================================================================
Total params: 813,185
Trainable params: 813,185
Non-trainable params: 0
"""
# model2.fit(training_padded, training_labels, batch_size=batch_size, epochs=num_epochs,
# validation_data=(testing_padded, testing_labels))
#
# # predict new sentences if they're sarcastic
# seed = [
# "granny starting to fear spiders in the garden might be real",
# "the weather today is bright and sunny"
# ]
# seed_seq = tokenizer.texts_to_sequences(seed)
# seed_pad = pad_sequences(seed_seq, maxlen=max_length, padding=padding_type, truncating=trunc_type)
# print(model2.predict(seed_pad))
"""
[[5.829891e-01]
[6.624551e-05]]
""" | [
"jennychou0823@gmail.com"
] | jennychou0823@gmail.com |
f3c87a426b410738daa15cf0e744653471d9a380 | b105fa4092a08d7263b7e67760c23734c227e740 | /41 lesson/urls.py | 7561cc882b8db2f0ae59155f2123f48ace68792b | [] | no_license | SerhijZhenzhera/zhzhs_python | 98276777839d7d5557216bdcaaf171dfd0e132d1 | 3401bfbe69a6eadf5d9e2cea26ae57e321312142 | refs/heads/master | 2023-04-17T12:29:52.764419 | 2021-04-27T02:32:13 | 2021-04-27T02:32:13 | 323,181,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,941 | py | from django.urls import path
from .views import edit, dashboard, register
from django.urls import reverse_lazy
from django.contrib.auth.views import (LoginView, LogoutView, PasswordResetDoneView, PasswordResetView,
PasswordResetCompleteView, PasswordResetConfirmView,
PasswordChangeView, PasswordChangeDoneView,
PasswordResetDoneView)
app_name = 'authapp'
urlpatterns = [
path('register/', register, name='register'),
path('edit/', edit, name='edit'),
path('dashboard/', dashboard, name='dashboard'),
path('', LoginView.as_view(template_name='registration/login.html'), name='login'),
path('logout/', LogoutView.as_view(template_name='authapp/logged_out.html'), name='logout'),
path('password_change/', PasswordChangeView.as_view(
template_name='authapp/password_change_form.html'), name='password_change'),
path('password_change/dond/', PasswordChangeDoneView.as_view(template_name='authapp/password_change_done.html'),
name='password_change_done'),
path('password_reset/', PasswordResetView.as_view(
template_name='authapp/password_reset_form.html',
email_template_name='authapp/password_reset_email.html',
success_url=reverse_lazy('authapp:password_reset_done')), name='password_reset'),
path('password_reset/done/', PasswordResetDoneView.as_view(
template_name='authapp/password_reset_done.html'), name='password_reset_done'),
path('reset/<uidb64>/<token>/', PasswordResetConfirmView.as_view(
template_name='authapp/password_reset_confirm.html',
success_url=reverse_lazy('authapp:login')), name='password_reset_confirm'),
path('reset/done/', PasswordResetCompleteView.as_view(
template_name='authapp/password_reset_complete.html'), name='password_reset_complete'),
]
| [
"noreply@github.com"
] | noreply@github.com |
c82d3a37fc944204f5db277b2c98979ab8efef44 | 76d4430567b68151df1855f45ea4408f9bebe025 | /test/functional/test_framework/coverage.py | 7f4c1c66546c66896b4314f57e91dcf935f48336 | [
"MIT"
] | permissive | MicroBitcoinOrg/MicroBitcoin | f761b2ff04bdcb650d7c0ddbef431ef95cd69541 | db7911968445606bf8899903322d5d818d393d88 | refs/heads/master | 2022-12-27T10:04:21.040945 | 2022-12-18T05:05:17 | 2022-12-18T05:05:17 | 132,959,214 | 21 | 33 | MIT | 2020-06-12T04:38:45 | 2018-05-10T22:07:51 | C++ | UTF-8 | Python | false | false | 3,386 | py | #!/usr/bin/env python3
# Copyright (c) 2015-2018 The MicroBitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Utilities for doing coverage analysis on the RPC interface.
Provides a way to track which RPC commands are exercised during
testing.
"""
import os
REFERENCE_FILENAME = 'rpc_interface.txt'
class AuthServiceProxyWrapper():
"""
An object that wraps AuthServiceProxy to record specific RPC calls.
"""
def __init__(self, auth_service_proxy_instance, coverage_logfile=None):
"""
Kwargs:
auth_service_proxy_instance (AuthServiceProxy): the instance
being wrapped.
coverage_logfile (str): if specified, write each service_name
out to a file when called.
"""
self.auth_service_proxy_instance = auth_service_proxy_instance
self.coverage_logfile = coverage_logfile
def __getattr__(self, name):
return_val = getattr(self.auth_service_proxy_instance, name)
if not isinstance(return_val, type(self.auth_service_proxy_instance)):
# If proxy getattr returned an unwrapped value, do the same here.
return return_val
return AuthServiceProxyWrapper(return_val, self.coverage_logfile)
def __call__(self, *args, **kwargs):
"""
Delegates to AuthServiceProxy, then writes the particular RPC method
called to a file.
"""
return_val = self.auth_service_proxy_instance.__call__(*args, **kwargs)
self._log_call()
return return_val
def _log_call(self):
rpc_method = self.auth_service_proxy_instance._service_name
if self.coverage_logfile:
with open(self.coverage_logfile, 'a+', encoding='utf8') as f:
f.write("%s\n" % rpc_method)
def __truediv__(self, relative_uri):
return AuthServiceProxyWrapper(self.auth_service_proxy_instance / relative_uri,
self.coverage_logfile)
def get_request(self, *args, **kwargs):
self._log_call()
return self.auth_service_proxy_instance.get_request(*args, **kwargs)
def get_filename(dirname, n_node):
"""
Get a filename unique to the test process ID and node.
This file will contain a list of RPC commands covered.
"""
pid = str(os.getpid())
return os.path.join(
dirname, "coverage.pid%s.node%s.txt" % (pid, str(n_node)))
def write_all_rpc_commands(dirname, node):
"""
Write out a list of all RPC functions available in `micro-cli` for
coverage comparison. This will only happen once per coverage
directory.
Args:
dirname (str): temporary test dir
node (AuthServiceProxy): client
Returns:
bool. if the RPC interface file was written.
"""
filename = os.path.join(dirname, REFERENCE_FILENAME)
if os.path.isfile(filename):
return False
help_output = node.help().split('\n')
commands = set()
for line in help_output:
line = line.strip()
# Ignore blanks and headers
if line and not line.startswith('='):
commands.add("%s\n" % line.split()[0])
with open(filename, 'w', encoding='utf8') as f:
f.writelines(list(commands))
return True
| [
"iamstenman@protonmail.com"
] | iamstenman@protonmail.com |
3018d83ac2e45b567d543161d4efa6c95141ef00 | f45cc0049cd6c3a2b25de0e9bbc80c25c113a356 | /LeetCode/石子游戏/5611. 石子游戏 VI.py | ea53be09b91192af8790730394fd8bcd26bf5197 | [] | no_license | yiming1012/MyLeetCode | 4a387d024969bfd1cdccd4f581051a6e4104891a | e43ee86c5a8cdb808da09b4b6138e10275abadb5 | refs/heads/master | 2023-06-17T06:43:13.854862 | 2021-07-15T08:54:07 | 2021-07-15T08:54:07 | 261,663,876 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,385 | py | """
5611. 石子游戏 VI
Alice 和 Bob 轮流玩一个游戏,Alice 先手。
一堆石子里总共有 n 个石子,轮到某个玩家时,他可以 移出 一个石子并得到这个石子的价值。Alice 和 Bob 对石子价值有 不一样的的评判标准 。
给你两个长度为 n 的整数数组 aliceValues 和 bobValues 。aliceValues[i] 和 bobValues[i] 分别表示 Alice 和 Bob 认为第 i 个石子的价值。
所有石子都被取完后,得分较高的人为胜者。如果两个玩家得分相同,那么为平局。两位玩家都会采用 最优策略 进行游戏。
请你推断游戏的结果,用如下的方式表示:
如果 Alice 赢,返回 1 。
如果 Bob 赢,返回 -1 。
如果游戏平局,返回 0 。
示例 1:
输入:aliceValues = [1,3], bobValues = [2,1]
输出:1
解释:
如果 Alice 拿石子 1 (下标从 0开始),那么 Alice 可以得到 3 分。
Bob 只能选择石子 0 ,得到 2 分。
Alice 获胜。
示例 2:
输入:aliceValues = [1,2], bobValues = [3,1]
输出:0
解释:
Alice 拿石子 0 , Bob 拿石子 1 ,他们得分都为 1 分。
打平。
示例 3:
输入:aliceValues = [2,4,3], bobValues = [1,6,7]
输出:-1
解释:
不管 Alice 怎么操作,Bob 都可以得到比 Alice 更高的得分。
比方说,Alice 拿石子 1 ,Bob 拿石子 2 , Alice 拿石子 0 ,Alice 会得到 6 分而 Bob 得分为 7 分。
Bob 会获胜。
提示:
n == aliceValues.length == bobValues.length
1 <= n <= 105
1 <= aliceValues[i], bobValues[i] <= 100
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/stone-game-vi
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
from typing import List
"""
贪心做法:
证明:
假设只有两个石头,对于 a, b 的价值分别是 a1, a2, b1, b2
第一种方案是A取第一个,B取第二个,A与B的价值差是 c1 = a1 - b2
第二种方案是A取第二个,B取第一个,A与B的价值差是 c2 = a2 - b1
那么这两种方案对于A来说哪一种更优,就取决于两个方案的价值差的比较
记 c = c1 - c2 = (a1 - b2) - (a2 - b1) = (a1 + b1) - (a2 + b2)
如果c > 0 那么方案一更优,如果c == 0,那么两种方案价值一样,如果c < 0那么方案二更优
那么比较两个方案的优劣 == 比较 a1 + b1 与 a2 + b2 的优劣 ,
归纳一下就是比较每个下标 i 的 a[i] + b[i] 的优劣
所以贪心的策略:将两组石头的价值合并,每次取价值最大的那一组。
写法:先将两个数组的价值合并,并用下标去标记
对价值排序,A取偶数下标,B取奇数下标,最后比较A,B的价值总和
"""
class Solution:
def stoneGameVI(self, a: List[int], b: List[int]) -> int:
arr = list(zip(a, b))
arr.sort(key=lambda x: x[0] + x[1], reverse=True)
n = len(a)
res_a, res_b = 0, 0
for i in range(n):
if i & 1 == 0:
res_a += arr[i][0]
else:
res_b += arr[i][1]
if res_a > res_b:
return 1
elif res_a < res_b:
return -1
else:
return 0
if __name__ == '__main__':
aliceValues = [1, 3]
bobValues = [2, 1]
print(Solution().stoneGameVI(aliceValues, bobValues))
| [
"1129079384@qq.com"
] | 1129079384@qq.com |
d544cf72032322140529f7e21f0da9b534d87aa6 | 84b27451c2d347cdc381b035905b17c5509160bd | /20180614_00/20180614_03.py | 85a0f1ac59e60a26cbff1d913a80e72060d91db1 | [] | no_license | TrunkWang/PythonNote | 70f998e8916c9a36d3fef422a913d21ebadd82ec | 5ede3d9ff6282a4b22dff22f5671d63c5c4e493b | refs/heads/master | 2020-03-19T02:45:34.837267 | 2018-06-24T13:47:49 | 2018-06-24T13:47:49 | 135,658,072 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 742 | py | #!/usr/bin/env python
# -*- coding:utf-8-*-
import threading
import time
import os
def coChore():
time.sleep(0.5)
class BootThread(threading.Thread):
def __init__(self,tid,monitor):
self.tid = tid
self.monitor = monitor
threading.Thread.__init__(self)
def run(self):
while True:
monitor['lock'].acquire()
if monitor['ticks'] != 0 :
monitor['ticks'] = monitor['ticks'] - 1
print 'self tid ' , self.tid , 'ticks ' ,monitor['ticks']
coChore()
else:
print 'self tid ' ,self.tid ,'finish'
os._exit(0)
monitor['lock'].release()
coChore()
lock = threading.Lock()
monitor = {'ticks':100,'lock':lock}
for k in range(10):
newclassthread = BootThread(k,monitor)
newclassthread.start()
| [
"xuefan0609@sohu.com"
] | xuefan0609@sohu.com |
58c456b9e168ba17eb5cc5d6e3bc8715df702e0d | f4dd8aa4e5476ffde24e27273dd47913c7f9177a | /Dlv2_safe2/tests/parser/edbidb.2.test.py | 7ca82d647d28036317512550cf746da486a374b1 | [
"Apache-2.0"
] | permissive | dave90/Dlv_safe2 | e56071ec1b07c45defda571cb721852e2391abfb | f127f413e3f35d599554e64aaa918bc1629985bc | refs/heads/master | 2020-05-30T10:44:13.473537 | 2015-07-12T12:35:22 | 2015-07-12T12:35:22 | 38,256,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | input = """
g(3,4).
g(4,1).
h(X,Y):- h(Y,X).
g(X,Y):- h(X,Z), g(Z,Y).
h(3,4).
g(5,2).
"""
output = """
g(3,4).
g(4,1).
h(X,Y):- h(Y,X).
g(X,Y):- h(X,Z), g(Z,Y).
h(3,4).
g(5,2).
"""
| [
"davide@davide-All-Series"
] | davide@davide-All-Series |
8d0039781b6ab3e7a778be7dadf37916bf356057 | 55aba9dae9b9b6caa5617b5d4f68e0678cdcda93 | /dm2019summer_hw2/regularization-cross-validation/logistic_r.py | 99e29939af4350dfcb9af0ff4592e2f28f6524c9 | [] | no_license | Dearkano/Data-Mining | e13422a38f1a5def4ddd90cd5a1ebcbe62c746d8 | c574e0ab4dd734e06c137a5a4e3fa3b770b062e9 | refs/heads/master | 2020-05-23T14:03:35.652520 | 2019-06-25T19:03:14 | 2019-06-25T19:03:14 | 186,793,159 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 717 | py | import numpy as np
def sigmoid(inX):
return (1 + np.exp(inX))
def logistic_r(X, y, lmbda):
'''
LR Logistic Regression.
INPUT: X: training sample features, P-by-N matrix.
y: training sample labels, 1-by-N row vector.
OUTPUT: w: learned parameters, (P+1)-by-1 column vector.
'''
P, N = X.shape
w = np.ones((P, 1))
r = 0.0001
l = np.matrix(y)
step = 0.001
iter = 1000
# YOUR CODE HERE
# begin answer
for i in range(iter):
prev = w
w = w - r * (np.sum(-y * X / ( 1 + np.exp(y * np.matmul(w.T, X))), axis=1).reshape(-1, 1)+ 2*lmbda*w)
if(np.linalg.norm(w-prev)<step):
break
# end answer
return w
| [
"vaynetian@cc98.org"
] | vaynetian@cc98.org |
eb5c2010387158948bc1e2996332dbd8a800d330 | 17bdb906c2c0886d6451b55ac84633d416d5c50a | /chapter_one/test_list.py | 28308ca52a00ce387a5426c39769e05cde52ba57 | [
"MIT"
] | permissive | vyahello/unittest-bootcamp | 10053994dc834720b76df90a37d4756a6f1437c7 | af24c5c00032ab7265a0c00da5955a26d25dff33 | refs/heads/master | 2021-07-17T03:42:30.058662 | 2020-05-09T22:21:17 | 2020-05-09T22:21:17 | 139,250,120 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,026 | py | from typing import List
from unittest import TestCase
class TestListMethods(TestCase):
""" This test case is verifying basic list data type methods. """
def test_append(self) -> None:
""" Test checks if given elements are adding to array """
flist: List[...] = []
for i in range(1, 4):
flist.append(i)
self.assertEqual(flist, [1, 2, 3])
def test_extend(self) -> None:
""" Test checks if given elements extends an array """
flist: List[int] = [1, 2, 3]
flist.extend(range(4, 6))
self.assertEqual(flist[-2:], [4, 5])
def test_insert(self) -> None:
""" Test checks if given element is inserted into array """
flist: List[int] = [1, 2, 3]
flist.insert(3, 4)
self.assertEqual(flist, [1, 2, 3, 4])
def test_pop(self) -> None:
""" Test checks if given element is deleted from an array """
flist: List[int] = [1, 2, 3]
flist.pop(1)
self.assertEqual(flist, [1, 3])
| [
"vyahello@gmail.com"
] | vyahello@gmail.com |
5679e7a79c5e5694fc959140e9c696630b307830 | 2a6dbece45c391e6dc3d28f04c7c02b18d17b24b | /myapi/views.py | e7798b0b48cf6abbd44a3179bd4bbeac4e5ba3e6 | [] | no_license | KassadReda/Rest_API_Blender | 3b1e43b2a488541d8d8a9aa9f95a39c4e70c34ae | ee9e5216462902a5cfee98a5502b4580b3af12e6 | refs/heads/main | 2023-04-17T06:35:49.204734 | 2021-05-03T22:45:52 | 2021-05-03T22:45:52 | 364,080,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 659 | py | """
class BlenderViewSet
this class define how to display a model.
by Reda
"""
# coding=utf-8
from django.shortcuts import render
from rest_framework import viewsets
from .serializers import BlenderModelSerializer
from .models import BlenderModel
# Create your views here.
class BlenderViewSet(viewsets.ModelViewSet) :
queryset = BlenderModel.objects.all().order_by('name')
serializer_class = BlenderModelSerializer
#serialise the uploaded file
def file(self, request,pk=None) :
blenderModel= self.get_object()
file = blenderModel.file
serializer = BlenderModelSerializer(file, data=request.data) | [
"="
] | = |
6b5d998541fd2b21e9067847d776fa45634941f0 | c33746a1eb2fdff17b302a457d96a8423025d956 | /const_and_inp.py | c08c61afe753d97fab33ee0ca883b0a21121a7c4 | [] | no_license | DavitNoreyanFD/Astronomy_Task_v_2 | 46577362ed6c35e4ffa6e337d81c7131fe9b562a | 15272e66177b6bd050fab98d6a89fd8d8bab47ab | refs/heads/master | 2023-04-10T06:21:05.712296 | 2021-04-21T19:41:58 | 2021-04-21T19:41:58 | 360,260,433 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 470 | py | """
All constants and imported variables were collected in this module.
"""
import configparser
config = configparser.ConfigParser()
config.read('config.ini')
data_file = config['USER']['data_file']
fov_v = float(config['USER']['fov_v'])
fov_h = float(config['USER']['fov_h'])
ra_user = float(config['USER']['ra_user'])
dec_user = float(config['USER']['dec_user'])
n = int(config['USER']['n'])
INDEX_ID = 7
INDEX_RA = 0
INDEX_DEC = 1
INDEX_MAG = 22
INDEX_FLUX = 20
| [
"davit.noreyan.fd@gmail.com"
] | davit.noreyan.fd@gmail.com |
85116fe45968b70261db2e77ba8680b3da2d7385 | 7604e3f3f4c6dae4a46473c2a672e87ac180dff3 | /CosineTransform/GradientDescent.py | 00d1c6a092e45ab50daaf38e15221dcaf60463f8 | [
"MIT"
] | permissive | Louiii/MarianaOptimisation | 55eaa5ef5e6c1b6e020a2c80223a5a2b514866f0 | 74e2f27c68add73b19656c23acdd654b0a24fc30 | refs/heads/master | 2022-04-25T05:03:24.572907 | 2020-04-23T00:27:24 | 2020-04-23T00:27:24 | 258,024,889 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,594 | py | from LoadSmooth import Mariana
import numpy as np
from tqdm import tqdm
import matplotlib.pyplot as plt
def gradDescent(xstart, alpha=0.005):
xcurrent = xstart
gradcurrent = m.grad_true(xcurrent)
xs, gs = [np.array([xcurrent[0], xcurrent[1], m.f_true(xcurrent)])], [gradcurrent]
for t in range(1, 200):
xcurrent += alpha * gradcurrent * min(1, 100/t)
gradcurrent = m.grad_true(xcurrent)
xs.append( np.array([xcurrent[0], xcurrent[1], m.f_true(xcurrent)]) )
gs.append( gradcurrent.copy() )
return xs, gs
def plotpath(xs, gs):
implot = plt.imshow(m.mariana)
x, y = list(zip(*xs))
x, y = np.array(x)*1000, np.array(y)*1000
gs = -np.array(gs)*0.001
xlims = np.array([min(x), max(x)])
ylims = np.array([min(y), max(y)])
dx, dy = xlims[1]-xlims[0], ylims[1]-ylims[0]
if dx>dy:
s = np.sum(ylims)/2
ylims = np.array([s-dx/2, s+dx/2])
else:
s = np.sum(xlims)/2
xlims = np.array([s-dy/2, s+dy/2])
xlims[0] -= 5
xlims[1] += 5
ylims[0] -= 5
ylims[1] += 5
plt.quiver(x, y, gs[:, 0], gs[:, 1])
plt.scatter(x, y, s=1, c='r')
plt.xlim(xlims)
plt.ylim(ylims)
plt.show()
plt.clf()
m = Mariana()
# xinit = np.random.uniform(0, 1, 2)
# xs, gs = gradDescent(xinit)
paths = []
n = 7
for xi in np.linspace(0.1, 0.9, n):
for yi in np.linspace(0.1, 0.9, n):
xs, gs = gradDescent(np.array([xi, yi]), alpha=0.001)
paths.append(xs)
# plotpath(np.array(xs)[:,:2], gs)
for i, p in enumerate(paths):
mat = np.matrix(np.array(p))
with open('forUnity/GradDescs/G'+str(i)+'.txt','wb') as f:
for line in mat:
np.savetxt(f, line, fmt='%.12f')
| [
"43211625+Louiii@users.noreply.github.com"
] | 43211625+Louiii@users.noreply.github.com |
4effb3beb42cb3fb7e5bbec90de7dd762dc6990a | 794d86860a924a1571dd6b04539d9459743580c9 | /check_normality.py | c6de4dde9b0a2604190a7614287a4c6e53e21b9e | [] | no_license | lixinf/DFSS_with_python | bc9e99454ecde00704362eb7f98f1c2a4f9d6589 | 55382033ded25e17f9cc75da68bcc5ad7891d84a | refs/heads/master | 2022-11-26T03:01:17.705658 | 2020-08-01T04:16:55 | 2020-08-01T04:16:55 | 284,179,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py |
"""
This code is used to identify the normality of data.
Code will return false if Data is empty or less than 2 number
Otherwise, code will return p_value for normality. p>0.05 means data is normal distribution
- For small sample-numbers (<300), you should use the "normaltest"
- the Kolmogorov-Smirnov(Kolmogorov-Smirnov) test should only be used for large sample numbers (>300)
"""
import numpy as np
import scipy.stats as stats
def check_normality(data):
n = len(data)
if n<=1:
return False
elif n<=300:
_, p_value = stats.normaltest(data, nan_policy='omit')
else:
_, p_value = stats.kstest((data-np.mean(data))/np.std(data,ddof=1),'norm')
return p_value
if __name__ =='main':
pass | [
"noreply@github.com"
] | noreply@github.com |
71986ad624234b73af60e4a768b4a74d431e3313 | 3fadc3b9353d1745dd3af9fc7fe7b2c4064a51e0 | /manage.py | be7718735b01234d68671099ff2cf955c2fe208b | [] | no_license | Ashish2831/Class-Based-CRUD-Project | 16a36ad9c4d615d0e6e517696c12312134953319 | c6445cfe298197e4050cf5f31ac332ff383b1237 | refs/heads/main | 2023-04-21T03:33:19.282851 | 2021-05-08T11:10:00 | 2021-05-08T11:10:00 | 365,490,100 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 680 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
"""Run administrative tasks."""
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'Class_Based_CRUD_Project.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"ashishpatil28032001@gmail.com"
] | ashishpatil28032001@gmail.com |
8c6b0a368630b73f5ec1414d732f2d73c7cf66e5 | 67a361a2a58d18776abc49814e362eddaa1997bf | /mongo.py | f47d50f51ba014ac66b2bbec40347b41ca6945d8 | [] | no_license | johnnyferns14/milestone-project-3 | b62d1038a9e3e4ce580f0e943b7a78f1ead56e64 | ebed6ac2650b4e89c05ec6c00fbc360b8b3b0eab | refs/heads/master | 2023-04-08T08:07:37.054734 | 2021-03-28T00:32:34 | 2021-03-28T00:32:34 | 347,879,830 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 614 | py | import os
import pymongo
MONGO_URI = os.environ.get("MONGO_URI")
DATABASE = "myRecipe"
COLLECTION = "recipies"
USERS = "members"
def mongo_connect(url):
try:
conn = pymongo.MongoClient(url)
print("Mongo is connected")
return conn
except pymongo.errors.ConnectionFailure as e:
print("Could not connect: %s") % e
conn = mongo_connect(MONGO_URI)
recipes = conn[DATABASE][COLLECTION]
users = conn[DATABASE][USERS]
posts = recipes.find()
records = users.find()
for post in posts:
print(post)
for record in records:
print(record)
| [
"johnnyferns@gmail.com"
] | johnnyferns@gmail.com |
d7e35795109593422c728043090178b3c899e3ec | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/65/usersdata/159/31031/submittedfiles/investimento.py | 5b61ed3d81f3cbef16ed56e9d8cb401d0f95499a | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 385 | py | # -*- coding: utf-8 -*-
from __future__ import division
a=float(input('Valor do investimento'))
x=float(input(''))
b=a+(x*a)
print('%.2f' %b)
c=b+(x*b)
print('%.2f' %c)
d=c+(x*c)
print('%.2f' %d)
e=d+(x*d)
print('%.2f' %d)
f=e+(x*e)
print('%.2f' %f)
g=f+(x*f)
print('%.2f' %g)
h=g+(x*g)
print('%.2f' %h)
i=h+(x*h)
print('%.2f' %i)
j=i+(x*i)
print('%.2f' %j)
l=j+(x*j)
print('%.2f' %l) | [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
7a2d804bfeae7d288dc2c166ea105a91da40ca97 | 3cd19164c17d9793ea506369454b8bacd5cebfa9 | /Backend/clubmg_bak_20190726/clubserver/urls.py | 48019cb3399c4c52f203ca02e80992ee2532ec11 | [] | no_license | Myxg/BadmintonClubSystem | 337a17728122ab929d37e7f2732850beb49d8be0 | 1c51236098ab3770cadd925212f9d3978ed83c2a | refs/heads/master | 2022-12-27T10:13:55.129630 | 2020-09-16T10:21:36 | 2020-09-16T10:21:36 | 295,998,099 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,500 | py | #coding: utf-8
from django.conf.urls import include, url
from rest_framework_jwt.views import obtain_jwt_token
from . import views
urlpatterns = [
url(r'^token-auth$', obtain_jwt_token),
url(r'^useradd$', views.UserAdd.as_view()),
url(r'^user$', views.UserView.as_view()),
url(r'^user/password$', views.UpdatePassword.as_view()),
url(r'^user/email$', views.UpdateEmail.as_view()),
url(r'^user/photo$', views.UpdatePhoto.as_view()),
url(r'^user/(?P<user_id>[0-9]+)$', views.EditUserView.as_view()),
url(r'^users$', views.UsersView.as_view()),
url(r'^group/(?P<pk_id>[0-9]+)$', views.GroupView.as_view()),
url(r'^groups$', views.GroupsView.as_view()),
url(r'^operations$', views.OperationsView.as_view()),
url(r'^permissions$', views.PermissionsView.as_view()),
url(r'^athlete/(?P<pk_id>[0-9]+)$', views.AthleteView.as_view()),
url(r'^athletes$', views.AthletesView.as_view()),
url(r'^athlete/company/(?P<pk_id>[0-9]+)$', views.AthleteCompanyView.as_view()),
url(r'^athlete/companys$', views.AthleteCompanysView.as_view()),
url(r'^athlete/sportevent/(?P<pk_id>[0-9]+)$', views.SportEventExpView.as_view()),
url(r'^athlete/group/(?P<pk_id>[0-9]+)$', views.AthleteGroupView.as_view()),
url(r'^athlete/groups$', views.AthleteGroupsView.as_view()),
url(r'^athlete/fitness/items$', views.FitnessItemsView.as_view()),
url(r'^athlete/fitness/datas$', views.FitnessDatasView.as_view()),
url(r'^athlete/fitness/data/(?P<pk_id>[0-9]+)$', views.FitnessDataView.as_view()),
url(r'^athlete/worldrankinglist$', views.WorldRankingListView.as_view()),
url(r'^athlete/worldranking/(?P<pk_id>[0-9]+)$', views.WorldRankingView.as_view()),
url(r'^athlete/olympicrankinglist$', views.OlympicRankingListView.as_view()),
url(r'^athlete/olympicranking/(?P<pk_id>[0-9]+)$', views.OlympicRankingView.as_view()),
url(r'^athlete/overview/(?P<pk_id>[0-9]+)$', views.AthleteOverViewView.as_view()),
url(r'^athlete/linkdocs/(?P<pk_id>[0-9]+)$', views.AthleteDocLinkView.as_view()),
url(r'^athlete/matchs/(?P<pk_id>[0-9]+)$', views.AthleteMatchVideosSearchView.as_view()),
url(r'^video/(?P<pk_id>[0-9]+)$', views.MatchVideoView.as_view()),
url(r'^videos$', views.MatchVideosSearchView.as_view()),
url(r'^matchinfo/(?P<pk_id>[0-9]+)$', views.MatchInfoView.as_view()),
url(r'^matchinfos$', views.MatchInfosView.as_view()),
url(r'^matchlist$', views.MatchListView.as_view()),
url(r'^matchlevel2list$', views.MatchLevel2NameView.as_view()),
url(r'^markdata/matchinfos$', views.MarkMatchInfosView.as_view()),
url(r'^markdata/show/(?P<name>(hits|scores|serverecord|playgroundrecord))/(?P<match_id>[0-9]{4}_[0-9]{4}_[0-9]{4})$', views.MarkDataShow.as_view()),
url(r'^markdata/sync/(?P<name>(hits|scores|serverecord|playgroundrecord))/(?P<match_id>[0-9]{4}_[0-9]{4}_[0-9]{4})$', views.MarkDataSync.as_view()),
url(r'^markdata/sync/(?P<name>(hits|scores))/(?P<match_id>[0-9]{4}_[0-9]{4}_[0-9]{4})/retry$', views.MarkDataSyncRetry.as_view()),
url(r'^docs/(?P<module_id>[a-zA-Z0-9_]+)(/)?$', views.DocsView.as_view()),
url(r'^docs/link/(?P<module_id>[a-zA-Z0-9_]+)(/)?$', views.DocLinkView.as_view()),
url(r'^history/(?P<module_id>[a-zA-Z0-9_]+)(/)?$', views.DocsView.as_view()),
url(r'^companylist$', views.CompanysListView.as_view()),
# test url
url(r'^sn/(?P<type_id>[a-z]+)$', views.NewSN.as_view()),
url(r'^test$', views.Test.as_view()),
]
| [
"15234407153@163.com"
] | 15234407153@163.com |
922e2af7bfc749605040905b8f8e66a656738ba6 | 0c89de726d02869a823d925d57e57676534e86b2 | /producers/models/weather.py | 58b95d05dc39a6f23e9b930f4e306cbcae7f6b64 | [] | no_license | aditya232/optimzing-chicago-public-transport | d4bba25211207fff33c55ec0bde50ab6a2fff43c | ea130877eb66415b28d92110a7c3fb82219eedaa | refs/heads/master | 2023-05-25T21:37:30.461224 | 2020-04-21T20:07:24 | 2020-04-21T20:07:24 | 257,704,771 | 0 | 0 | null | 2023-08-14T22:09:48 | 2020-04-21T20:12:22 | Python | UTF-8 | Python | false | false | 2,978 | py | """Methods pertaining to weather data"""
import json
import logging
import random
import requests
from enum import IntEnum
from models.producer import Producer
from pathlib import Path
logger = logging.getLogger(__name__)
class Weather(Producer):
"""Defines a simulated weather model"""
status = IntEnum(
"status", "sunny partly_cloudy cloudy windy precipitation", start=0
)
rest_proxy_url = "http://localhost:8082"
key_schema = None
value_schema = None
winter_months = set((0, 1, 2, 3, 10, 11))
summer_months = set((6, 7, 8))
def __init__(self, month):
#
#
# TODO: Complete the below by deciding on a topic name, number of partitions, and number of
# replicas
#
#
super().__init__(
"org.chicago.cta.weather.v2",
key_schema=Weather.key_schema,
value_schema=Weather.value_schema,
num_partitions=1,
num_replicas=3
)
self.status = Weather.status.sunny
self.temp = 70.0
if month in Weather.winter_months:
self.temp = 40.0
elif month in Weather.summer_months:
self.temp = 85.0
if Weather.key_schema is None:
with open(f"{Path(__file__).parents[0]}/schemas/weather_key.json") as f:
Weather.key_schema = json.load(f)
if Weather.value_schema is None:
with open(f"{Path(__file__).parents[0]}/schemas/weather_value.json") as f:
Weather.value_schema = json.load(f)
def _set_weather(self, month):
"""Returns the current weather"""
mode = 0.0
if month in Weather.winter_months:
mode = -1.0
elif month in Weather.summer_months:
mode = 1.0
self.temp += min(max(-20.0, random.triangular(-10.0, 10.0, mode)), 100.0)
self.status = random.choice(list(Weather.status))
def run(self, month):
self._set_weather(month)
resp = requests.post(
f"{Weather.rest_proxy_url}/topics/{self.topic_name}",
headers={"Content-Type": "application/vnd.kafka.avro.v2+json"},
data=json.dumps(
{"key_schema" : json.dumps(Weather.key_schema),
"value_schema" : json.dumps(Weather.value_schema),
"records": [
{
"key": {"timestamp": self.time_millis()},
"value": {"temperature": self.temp, "status": self.status.name}
}
]
}
),
)
logger.info("sending weather : {}".format(json.dumps({"key": {"timestamp": self.time_millis()},"value": {"temperature": self.temp, "status": self.status.name}})))
resp.raise_for_status()
logger.debug(
"sent weather data to kafka, temp: %s, status: %s",
self.temp,
self.status.name,
)
| [
"wingify@WINDELLAP-223.local"
] | wingify@WINDELLAP-223.local |
1a1c18677bf3255b70a8c250813450f293c27f9f | 38e477b325ec85256caee1eac021e821ad9d31e9 | /src/sigmoid_tanh.py | 65a8823db8451f7c27892f0e680d865cf1032529 | [
"Apache-2.0"
] | permissive | wojtekwalczak/ml_snippets | 04cfbb448bc2202ef364a67e4037aaa4b339f1c2 | caf140b36a0d586d26b5a10f128827db7ea0c0e1 | refs/heads/master | 2022-01-29T13:36:52.985536 | 2022-01-16T10:50:09 | 2022-01-16T10:50:09 | 135,038,134 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,134 | py | #!/usr/bin/env python3
"""Draws `W = tanh(W_hat) * sigmoid(M_hat)`, where `*` is element-wise multiplication.
The formula is taken from the following paper: https://arxiv.org/pdf/1808.00508.pdf
The question is: why on earth the surface resulting from element-wise multiplication
of tanh and sigmoid makes sense when one would like to learn `W` to be (approximately)
one of {-1, 0, 1}.
The answer is: this kind of surface has stationary points near -1 and 1 (like `tanh`),
but also kind of mezzanine floor close to 0.
"""
import numpy as np
import matplotlib.pyplot as plt
# This import is necessary for 3d projection
# from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import Axes3D
def sigmoid(x):
return 1/(1+np.exp(-x))
def count_W(W_hat, M_hat):
"""See page 3 of: https://arxiv.org/pdf/1808.00508.pdf"""
return np.multiply(np.tanh(W_hat), sigmoid(M_hat))
xs = np.linspace(-10, 10, 100)
ys = np.linspace(-10, 10, 100)
X, Y = np.meshgrid(xs, ys)
W = count_W(X.ravel(), Y.ravel())
W = W.reshape(X.shape)
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot_surface(X=X, Y=Y, Z=W)
plt.show() | [
"wojciech.walczak@tcl.com"
] | wojciech.walczak@tcl.com |
86047464007e688dadd3b3c27012b467b686a566 | bb150497a05203a718fb3630941231be9e3b6a32 | /framework/api/optimizer/reader.py | ebbceb27d15008ded7a2c8cd080b7547fb67cd48 | [] | no_license | PaddlePaddle/PaddleTest | 4fb3dec677f0f13f7f1003fd30df748bf0b5940d | bd3790ce72a2a26611b5eda3901651b5a809348f | refs/heads/develop | 2023-09-06T04:23:39.181903 | 2023-09-04T11:17:50 | 2023-09-04T11:17:50 | 383,138,186 | 42 | 312 | null | 2023-09-13T11:13:35 | 2021-07-05T12:44:59 | Python | UTF-8 | Python | false | false | 222 | py | #!/bin/env python
# -*- coding: utf-8 -*-
# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python
"""
reader
"""
import numpy as np
reader = np.random.random(size=[1, 1, 10])
reader_img = np.random.random(size=[1, 3, 16, 16])
| [
"noreply@github.com"
] | noreply@github.com |
5dba47e35bc312535682975ede83decc02cb821c | a2d709f4665e01b3f7e209850747f461b890746b | /nf_real_fade_out_Det.py | 8f5b5c3325454056783e11e53d06feb746cab60e | [] | no_license | tphwong/notch_filter | 6bb8f1cd4cebb7256fce7824fe37e98bb575b9d2 | af5b91e806565f2bf72241c465c836b5ee961c5d | refs/heads/master | 2021-04-15T12:58:59.184645 | 2018-08-08T14:46:12 | 2018-08-08T14:46:12 | 126,387,714 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 725 | py | import serial
import time
import math
import nf
import nf_header
# specify which COM port to open
com = nf.comOpen('COM32')
speed = 31.3 # assume vehicle speed is 70 mi/h, converted to m/s; 70mi/h === 31.3m/s
freq = 105100000 # frequency at which HD data is transmitted
wavelength = 299800000 / freq # by v = f * lambda
distance = 28913.2 # start distance is the distance from Mobis to 105.1MHz radio tower in meters
height = 349 # antenna height for 91.7MHz in meters
duration = 3600 # duration of test case
maxDist = 4120 * math.sqrt(height) # end distance is distance right when AA signal is completely cut off (considering Eath's curvature)
nf.real_fade_out_Det(com, speed, wavelength, distance, duration, maxDist) | [
"tphwong@umich.edu"
] | tphwong@umich.edu |
ddebfadd0c2daa17807f8177153eeb52ef49dbd9 | df2499ddc1e44aa36cf82d652a3d4501efdeea14 | /burnerbotserver/admin.py | 3ae9dd6521daad321efcb966d5637ac8d4d5ae87 | [] | no_license | johngrantuk/burnerbotserver | 5e232c85df58076818c4d5353440bfa0b82545c2 | 3a102d46073a6ed25ca8cca8a4cea465f6d65cdc | refs/heads/master | 2020-05-17T00:19:01.545866 | 2019-04-25T09:04:06 | 2019-04-25T09:04:06 | 183,393,293 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | from django.contrib import admin
from .models import UserDetail
admin.site.register(UserDetail)
| [
"johngrantuk@googlemail.com"
] | johngrantuk@googlemail.com |
8fae93e73af0f748fedd37ca6fe18c5c6b00fb09 | 6b6fe6cb7e182d078779e801fcb57c379d6e4bdd | /venv/bin/bandit | ac30727966bba95cfd3e82d068c89da9b5d43ab5 | [] | no_license | lm-bsl/fsnd_p1 | 76c3c591bb0c87b8b6b3a5215f38c0b9e8a7d3d9 | efd97b57d5b23a4fe8719b8fe757b8471d7ecad3 | refs/heads/master | 2021-10-25T12:27:12.689337 | 2019-11-11T22:03:35 | 2019-11-11T22:03:35 | 221,076,732 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 261 | #!/Users/max/PycharmProjects/FSND_project_1/venv/bin/python3.7
# -*- coding: utf-8 -*-
import re
import sys
from bandit.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"maxime.letellier@ml3w.com"
] | maxime.letellier@ml3w.com | |
882dd051b7a1fff21dee017e84de337271d6bcb6 | 39329ae5773c9b4c1f9c91eec393507f5e8ae1c0 | /server/.history/server_20200307213734.py | 40e4aa62a922652973d3ff4b8b8636ddb900856f | [] | no_license | dobreandrei1/legal | 083267aae7faa10775e5a634679869fce0ac3136 | dd05fad8df599f9fc34f56628ebd8861f7a004b4 | refs/heads/master | 2021-09-08T20:16:29.926214 | 2020-03-08T09:24:04 | 2020-03-08T09:24:04 | 245,785,262 | 0 | 0 | null | 2021-09-03T00:42:33 | 2020-03-08T09:22:37 | Python | UTF-8 | Python | false | false | 1,712 | py | from pathlib import Path
from flask import Flask, render_template, request, send_file, send_from_directory, safe_join, abort, current_app
# from werkzeug import secure_filename
import pandas as pd
import os
import time
import json
from flask_cors import CORS
from haikunator import Haikunator
import unidecode
import PyPDF2
import unidecode
haikunator = Haikunator()
app = Flask(__name__)
CORS(app)
applicationVersion = 0
@app.route('/upload')
def upload_file():
return render_template('upload.html')
@app.route('/api/titles', methods = ['GET', 'POST'])
def get_titles():
if request.method == 'POST':
f = request.files['file']
filename = request.form['filename']
# TODO: maybe check if file alreay exists and not save multipletime
# - get list of all files
# - if filename variable is a substr of any file name in folder: compare their contents
# - if match don`t save file again but use that one
name = filename + '.pdf'
if Path(name).exists():
name = filename + '.pdf'
f.save(name)
pdfFileObject = open('clauze.pdf', 'rb')
pdfReader = PyPDF2.PdfFileReader(pdfFileObject)
pages = pdfReader.numPages
clauzeDoc = ''
for page in pages:
clauzeDoc += pdfReader.getPage(page).extractText()
pdfFileObject1 = open(name, 'rb')
pdfReader1 = PyPDF2.PdfFileReader(pdfFileObject1)
pages1 = pdfReader1.numPages
contractDoc = ''
for page in pages1:
contractDoc += pdfReader1.getPage(page).extractText()
return 1
if __name__ == '__main__':
app.run(debug = False, host='0.0.0.0')
| [
"dobreandrei1@yahoo.com"
] | dobreandrei1@yahoo.com |
4062480923890be48ce91948af01567a73be9bed | 6573a45c4389688666821621c26a5a95a0765e4d | /archived_envs/20190625_100746/bin/google-oauthlib-tool | ed107688f59c3ccd9c6f360932ed99b926c0aff3 | [] | no_license | ilhmndn/Warehouse-Management-System-With-Frappe | 66a41be2286dbdb556ab51a4788fc42987d6ed2e | bd9864c5a04a6e2f2f625a8755fba3df4b6409be | refs/heads/master | 2022-10-23T11:13:57.810948 | 2019-07-02T05:18:19 | 2019-07-02T05:18:19 | 194,467,571 | 2 | 2 | null | 2022-10-15T16:16:10 | 2019-06-30T02:40:05 | Python | UTF-8 | Python | false | false | 264 | #!/home/ilhmndn/frappe-training/env/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from google_auth_oauthlib.tool.__main__ import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"ilhmndn@localhost.localdomain"
] | ilhmndn@localhost.localdomain | |
19c37356466ad944b8cb042d417054ce008b1f64 | 17bdf40c2bbdf3dd09bf0fa683d471f4e07159fd | /asymmetric_jwt_auth/apps.py | be5200a58c04ac73f83aa2863dfef64592b567c1 | [
"ISC"
] | permissive | chiranthsiddappa/asymmetric_jwt_auth | c8c9f0a11b36994b72c87f2d834189df94ef6fee | a95d28ba61e38395da483243a6f536bf25a41e74 | refs/heads/master | 2020-12-25T17:56:18.972703 | 2016-05-24T05:16:06 | 2016-05-24T05:16:06 | 59,540,392 | 0 | 0 | null | 2016-05-24T04:30:18 | 2016-05-24T04:30:18 | null | UTF-8 | Python | false | false | 151 | py | from django.apps import AppConfig
class JWTAuthConfig(AppConfig):
name = 'asymmetric_jwt_auth'
verbose_name = "Asymmetric Key Authentication"
| [
"crgwbr@gmail.com"
] | crgwbr@gmail.com |
ec9c7d2a3db1de8e236b53724f1cbf8c028163a0 | 8638bf84783732f87d88b02ad0a41f68684fbe1f | /plotsprecht.py | 4bb13dda0e82bafc31cdb209b2b5a4505d07baeb | [] | no_license | JLammering/V-351 | fa7f43d025e44426e143b12e4517a4bd734fe559 | 2532300f9ae01a3acc68afc2061efacf349b12e4 | refs/heads/master | 2021-01-10T16:30:20.834091 | 2016-01-13T12:43:44 | 2016-01-13T12:45:33 | 48,593,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | import matplotlib.pyplot as plt
import numpy as np
#from itertools import chain
#Messdaten einladen
# k, U = np.genfromtxt('datenb_1.txt', unpack = True)
# #plt.plot(f, U, 'k.', label = r'Messdaten')
#
# N=9
#
# ind = np.arange(N)
# width = 0.3
# ax = plt.subplots()
#
#
# rects = ax.bar(ind, U, width, color ='r')
# ax.set_ylabel(r'$U\:/\:\si{\milli\volt}$')
# ax.set_xlabel(r'$\text{Frequenz}$')
# ax.set_xticks(ind)
# ax.set_xticklabels((r'$\nu$', r'$3\nu$', r'$5\nu$', r'$7\nu$', r'$9\nu$', r'$11\nu$',
# r'$13\nu$', r'$15\nu$', r'$17\nu$'))
#
# ax.legend((rects[0]),('Linienspektrum'))
#
# autolabel(rects)
#
#
# plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
# plt.savefig('build/plotsprecht.pdf')
k, U = np.genfromtxt('datenb_1.txt', unpack = True)
#plt.plot(k, U, 'k.', label = r'Messdaten')
#k_1 = list (chain.from_iterable((x, x)for x in k))
#U_1 = list (chain.from_iterable((0, x)for x in U))
#plt.plot(k_1, U_1, 'r-', label = r'Linien')
for a in zip(k,U):
keins, Ueins = a
plt.plot((keins, keins), (0,Ueins),label = r'Amplituden')
plt.xlabel(r'$\text{Frequenz}$')
plt.ylabel(r'$U\:/\:\si{\milli\volt}$')
plt.tight_layout(pad=0, h_pad=1.08, w_pad=1.08)
plt.savefig('build/plotsprecht.pdf')
| [
"jlammering@me.com"
] | jlammering@me.com |
54fde6cf27909a8351ad62b64d290dbcb4045d4a | 648e5ea6722db2f29806e24f11cf169257dfc1c7 | /doorsadmin/migrations/0076_auto__add_field_doorway_priority__add_field_snippetsset_priority__add_.py | 20830d5176fda335cd25b5743aa423cf2137cfb5 | [] | no_license | cash2one/doorscenter | 30d4f65e3fb57c417df3f09d7feab721d8425faa | d2771bf04aa187dda6d468883a5a167237589369 | refs/heads/master | 2021-05-27T15:38:56.219907 | 2012-06-20T05:38:15 | 2012-06-20T05:38:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,100 | py | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Doorway.priority'
db.add_column('doorsadmin_doorway', 'priority', self.gf('django.db.models.fields.CharField')(default='std', max_length=20), keep_default=False)
# Adding field 'SnippetsSet.priority'
db.add_column('doorsadmin_snippetsset', 'priority', self.gf('django.db.models.fields.CharField')(default='std', max_length=20), keep_default=False)
# Adding field 'SpamTask.priority'
db.add_column('doorsadmin_spamtask', 'priority', self.gf('django.db.models.fields.CharField')(default='std', max_length=20), keep_default=False)
# Adding field 'XrumerBaseR.priority'
db.add_column('doorsadmin_xrumerbaser', 'priority', self.gf('django.db.models.fields.CharField')(default='std', max_length=20), keep_default=False)
def backwards(self, orm):
# Deleting field 'Doorway.priority'
db.delete_column('doorsadmin_doorway', 'priority')
# Deleting field 'SnippetsSet.priority'
db.delete_column('doorsadmin_snippetsset', 'priority')
# Deleting field 'SpamTask.priority'
db.delete_column('doorsadmin_spamtask', 'priority')
# Deleting field 'XrumerBaseR.priority'
db.delete_column('doorsadmin_xrumerbaser', 'priority')
models = {
'doorsadmin.agent': {
'Meta': {'object_name': 'Agent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'currentTask': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateLastPing': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.IntegerField', [], {'default': '3', 'null': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'doorsadmin.domain': {
'Meta': {'object_name': 'Domain'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateExpires': ('django.db.models.fields.DateField', [], {'default': 'datetime.date(2012, 6, 7)', 'null': 'True', 'blank': 'True'}),
'dateRegistered': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Host']", 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ipAddress': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.IPAddress']", 'null': 'True', 'blank': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'linkedDomains': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'linkedDomains_rel_+'", 'null': 'True', 'to': "orm['doorsadmin.Domain']"}),
'maxDoorsCount': ('django.db.models.fields.IntegerField', [], {'default': '25'}),
'maxLinkedDomains': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '200'}),
'nameServer1': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'nameServer2': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'net': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Net']", 'null': 'True', 'blank': 'True'}),
'netLevel': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'niche': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Niche']", 'null': 'True', 'blank': 'True'}),
'registrator': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'})
},
'doorsadmin.doorgenprofile': {
'Meta': {'object_name': 'DoorgenProfile'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'settings': ('django.db.models.fields.TextField', [], {'default': "''"}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'})
},
'doorsadmin.doorway': {
'Meta': {'object_name': 'Doorway'},
'agent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Agent']", 'null': 'True', 'blank': 'True'}),
'analyticsId': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'cyclikId': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'domain': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Domain']", 'null': 'True', 'blank': 'True'}),
'domainFolder': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'doorgenProfile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.DoorgenProfile']", 'null': 'True'}),
'doorwaySchedule': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.DoorwaySchedule']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywordsList': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'keywordsSet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.KeywordsSet']", 'null': 'True', 'blank': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'netLinksList': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'niche': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Niche']", 'null': 'True'}),
'pagesCount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'piwikId': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'default': "'std'", 'max_length': '20'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'runTime': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'spamLinksCount': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'stateManaged': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Template']", 'null': 'True', 'blank': 'True'})
},
'doorsadmin.doorwayschedule': {
'Meta': {'object_name': 'DoorwaySchedule'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateEnd': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'dateStart': ('django.db.models.fields.DateField', [], {'default': 'datetime.date.today', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'doorgenProfile': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.DoorgenProfile']", 'null': 'True'}),
'doorsPerDay': ('django.db.models.fields.IntegerField', [], {'default': '1', 'null': 'True'}),
'doorsToday': ('django.db.models.fields.IntegerField', [], {'default': '0', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywordsSet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.KeywordsSet']", 'null': 'True', 'blank': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'lastRun': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'maxPagesCount': ('django.db.models.fields.IntegerField', [], {'default': '900', 'null': 'True'}),
'maxSpamLinksPercent': ('django.db.models.fields.FloatField', [], {'default': '5'}),
'minPagesCount': ('django.db.models.fields.IntegerField', [], {'default': '500', 'null': 'True'}),
'minSpamLinksPercent': ('django.db.models.fields.FloatField', [], {'default': '4'}),
'net': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Net']", 'null': 'True'}),
'niche': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Niche']", 'null': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Template']", 'null': 'True', 'blank': 'True'})
},
'doorsadmin.event': {
'Meta': {'object_name': 'Event'},
'date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'text': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'info'", 'max_length': '50', 'blank': 'True'})
},
'doorsadmin.host': {
'Meta': {'object_name': 'Host'},
'company': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'controlPanelType': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '50', 'blank': 'True'}),
'controlPanelUrl': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'costPerMonth': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'diskSpace': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'ftpLogin': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'ftpPassword': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'ftpPort': ('django.db.models.fields.IntegerField', [], {'default': '21', 'blank': 'True'}),
'hostName': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'rootDocumentTemplate': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'traffic': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'shared'", 'max_length': '50', 'blank': 'True'})
},
'doorsadmin.ipaddress': {
'Meta': {'object_name': 'IPAddress'},
'address': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Host']", 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'})
},
'doorsadmin.keywordsset': {
'Meta': {'object_name': 'KeywordsSet'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'encoding': ('django.db.models.fields.CharField', [], {'default': "'cp1251'", 'max_length': '50'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keywordsCount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'localFolder': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'niche': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Niche']", 'null': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'})
},
'doorsadmin.net': {
'Meta': {'object_name': 'Net'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'analyticsId': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'cyclikId': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'piwikId': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'settings': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'})
},
'doorsadmin.niche': {
'Meta': {'object_name': 'Niche'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'analyticsId': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'cyclikId': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'piwikId': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'stopwordsList': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'tdsSchemes': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'doorsadmin.snippetsset': {
'Meta': {'object_name': 'SnippetsSet'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'agent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Agent']", 'null': 'True', 'blank': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateLastParsed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interval': ('django.db.models.fields.IntegerField', [], {'default': '100', 'null': 'True'}),
'keywordsCount': ('django.db.models.fields.IntegerField', [], {'default': '500', 'null': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'localFile': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'niche': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Niche']", 'null': 'True'}),
'phrasesCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'default': "'std'", 'max_length': '20'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'runTime': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'stateManaged': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'})
},
'doorsadmin.spamlink': {
'Meta': {'object_name': 'SpamLink'},
'anchor': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000'}),
'doorway': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Doorway']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'spamTask': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.SpamTask']", 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1000'})
},
'doorsadmin.spamtask': {
'Meta': {'object_name': 'SpamTask'},
'agent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Agent']", 'null': 'True', 'blank': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'failsCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'halfSuccessCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'priority': ('django.db.models.fields.CharField', [], {'default': "'std'", 'max_length': '20'}),
'profilesCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'runTime': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'snippetsSet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.SnippetsSet']", 'null': 'True', 'blank': 'True'}),
'stateManaged': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'successCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'xrumerBaseR': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.XrumerBaseR']", 'null': 'True'})
},
'doorsadmin.template': {
'Meta': {'object_name': 'Template'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'localFolder': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'niche': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Niche']", 'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'type': ('django.db.models.fields.CharField', [], {'default': "'none'", 'max_length': '50', 'blank': 'True'})
},
'doorsadmin.xrumerbaser': {
'Meta': {'object_name': 'XrumerBaseR'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'agent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Agent']", 'null': 'True', 'blank': 'True'}),
'baseNumber': ('django.db.models.fields.IntegerField', [], {'default': '42', 'unique': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'emailAddress': ('django.db.models.fields.CharField', [], {'default': "'niiokr2012@gmail.com'", 'max_length': '200'}),
'emailLogin': ('django.db.models.fields.CharField', [], {'default': "'niiokr2012@gmail.com'", 'max_length': '200'}),
'emailPassword': ('django.db.models.fields.CharField', [], {'default': "'kernel32'", 'max_length': '200'}),
'emailPopServer': ('django.db.models.fields.CharField', [], {'default': "'pop.gmail.com'", 'max_length': '200'}),
'failsCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'halfSuccessCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'linksCount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'nextSpamTaskDomainsCount': ('django.db.models.fields.IntegerField', [], {'default': '4'}),
'niche': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.Niche']", 'null': 'True'}),
'nickName': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'priority': ('django.db.models.fields.CharField', [], {'default': "'std'", 'max_length': '20'}),
'profilesCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'realName': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'runTime': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'snippetsSet': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.SnippetsSet']", 'null': 'True', 'blank': 'True'}),
'spamTaskDomainLinksMax': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'spamTaskDomainLinksMin': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'spamTaskDomainsMax': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'spamTaskDomainsMin': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'stateManaged': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'}),
'successCount': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'xrumerBaseRaw': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['doorsadmin.XrumerBaseRaw']", 'null': 'True'})
},
'doorsadmin.xrumerbaseraw': {
'Meta': {'object_name': 'XrumerBaseRaw'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'baseNumber': ('django.db.models.fields.IntegerField', [], {'default': '42', 'unique': 'True'}),
'dateAdded': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'dateChanged': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'lastError': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'linksCount': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'remarks': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'stateSimple': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '50'})
}
}
complete_apps = ['doorsadmin']
| [
"alex@altstone.com"
] | alex@altstone.com |
2a46dd981ecb739e9e46f9c71da3e33492974bb5 | a075ee186532121edbe5febd9978d9c98fb7d888 | /app/test/test_service.py | 2d99be24b353940df520ed27d15ce723d6f85acc | [] | no_license | HoonJin/fastapi-sample | ef68c77b4129bf6d855ef23d19089aba4e81f3ee | 0b535711e47bf0c0b2d2b2e984859223d0bfb502 | refs/heads/master | 2022-12-24T00:27:41.655204 | 2020-09-29T10:10:52 | 2020-09-29T10:10:52 | 249,173,393 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 870 | py | from config.exceptions import NotFoundException
from database import db
from .test_dao import TestDao
class TestService:
@staticmethod
async def get_all_pagination(page: int, per_page: int) -> dict:
total_cnt = await TestDao.get_all_count()
total_page = int(total_cnt / per_page) + (0 if total_cnt % per_page == 0 else 1)
offset = (page - 1) * per_page
result = await TestDao.get_all_with_offset_and_limit(offset, per_page)
return {
'content': result,
'total_cnt': total_cnt,
'total_page': total_page
}
@staticmethod
async def delete(t_id: int) -> None:
row = await TestDao.find_by_id(t_id)
if row is not None:
async with db.transaction():
await TestDao.delete_by_id(t_id)
else:
raise NotFoundException
| [
"bwjhj1030@gmail.com"
] | bwjhj1030@gmail.com |
c869b985f6efcc06cb7b2c9d71bff7a64242bbeb | 856c2ce82997975f747f04b823819e6c65624477 | /blog/urls.py | f69c1a3a567d53eeb5703fa1058d4968a4795da9 | [] | no_license | leadmeout/simple_blog | 348f83db921d6b0a9cf46d24cdb87b160552d61f | edfef1295dbaa9a35d60e6d7fa4cfb0c9a267da0 | refs/heads/master | 2023-01-04T10:30:15.605104 | 2020-10-28T20:14:49 | 2020-10-28T20:14:49 | 308,122,152 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 797 | py | """blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('blog_posts.urls')),
]
| [
"markhorvat@protonmail.com"
] | markhorvat@protonmail.com |
34d1e03b2cc53375a6a2534a8570f72711e0d8ba | d3b4c1ad4fb521b7d44ec66ab88a6ca075d20533 | /q16.py | 395bd5fae27c8b3b749f6e4c8de9b99a2264c2ab | [] | no_license | Ayesha116/piaic.assignment | 06b57c25f38a0836da5f26c654682c1ca7e4d665 | 5ef29d394cea55c1d467f45d6b07d611378b419f | refs/heads/master | 2020-06-16T07:24:13.153377 | 2019-07-06T19:42:43 | 2019-07-06T19:42:43 | 195,512,041 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 373 | py | #Write a Python program to compute the distance between the points (x1, y1) and (x2, y2).
import math
a = int(input("enter co-ordinate of x1: " ))
b= int(input("enter co-ordinate of x2: " ))
c= int(input("enter co-ordinate of y1: " ))
d= int(input("enter co-ordinate of y2: " ))
distance = math.sqrt(((b-a)**2)+((d-c)**2))
print("distance between two points is", distance) | [
"ayeshajawed1168@gmail.com"
] | ayeshajawed1168@gmail.com |
747f951d4ca6a9d9514d42fb822a390819fb5427 | 17a5d9c1b10c9d0483d69cb2201c054d96cb0c68 | /main.py | 16c4cb2fbf4b85e1e443b20e09007eeb6d1ecf23 | [] | no_license | Croustimath/Projet-ISN | 95ab9efc221e0442a54e57fbc5501c4d342edb3e | 90032cd51609bd47ffa42a3f69618cfc0d47fe6e | refs/heads/master | 2020-05-03T01:54:09.932073 | 2019-05-11T14:34:46 | 2019-05-11T14:34:46 | 178,352,379 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,456 | py | from tkinter import *
import time
from math import*
########################################################## Classes #####################################################################
class Niveau:
"""Classe permettant de créer un niveau"""
def __init__(self,fichier):
self.liste = 0
self.fichier = fichier
def generer(self):
"""Méthode permettant de générer le niveau en fonction du fichier.
On crée une liste générale, contenant une liste par ligne à afficher"""
#On ouvre le fichier
with open(self.fichier, "r") as fichier:
structure_niveau = []
#On parcourt les lignes du filter
for ligne in fichier:
ligne_niveau = []
#On parcourt les sprites (lettres) contenus dans le fichier
for sprite in ligne:
#On ignore les "\n" de fin de ligne
if sprite != '\n':
#On ajoute le sprite à la liste de la ligne
ligne_niveau.append(sprite)
#On ajoute la ligne à la liste du niveau
structure_niveau.append(ligne_niveau)
#On sauvegarde cette structure
self.liste = structure_niveau
def afficher(self):
"""Méthode permettant d'afficher le niveau en fonction
de la liste de structure renvoyée par generer()"""
self.fond=PhotoImage(file="images/fond.gif")
canvas.create_image(0,0, anchor = NW, image=self.fond)
self.fond_menu=PhotoImage(file="images/fond_menu.gif")
canvas.create_image(0,780,anchor = NW, image=self.fond_menu)
self.mur = PhotoImage(file="images/mur.gif")
self.depart = PhotoImage(file="images/depart.gif")
self.arrivee = PhotoImage(file="images/arrivee.gif")
self.trol = PhotoImage(file="images/trol.gif")
#On parcourt la liste du niveau
num_ligne = 0
for ligne in self.liste:
#On parcourt les listes de lignes
num_case = 0
for sprite in ligne:
#On calcule la position réelle en pixels
x = num_case * taille_sprite
y = num_ligne * taille_sprite
if sprite == 'm': #m = Mur
canvas.create_image(x, y,anchor = NW,image=self.mur)
elif sprite == 'd': #d = Départ
canvas.create_image(x, y,anchor = NW,image=self.depart)
elif sprite == 'a': #a = Arrivée
canvas.create_image(x, y,anchor = NW,image=self.arrivee)
num_case += 1
num_ligne += 1
self.tour1_menu_image= PhotoImage(file="images/tour1_menu.gif")
self.tour1_menu=canvas.create_image(80,800,anchor=NW,image=self.tour1_menu_image)
class Mechant:
def __init__(self,niveau,vitesse,vie,vitesse_tir):
self.niveau=niveau
#Variable des ennemis
self.case_x=0
self.case_y=0
self.x = 60
self.y = 0
self.vitesse=vitesse
self.vie_monstre=vie
self.vie_monstre_ref=vie
#Variable barre de vie des ennemis
self.color='green'
self.xBV=60
self.xmaxBV=60
self.yBV=0
self.jaune=False
self.rouge=False
#Variable vitesse tir des tours
self.vitesse_tir=vitesse_tir
def creation(self):
self.img_monstre=PhotoImage(file="images/monstre.gif")
self.monstre=canvas.create_image(self.case_x,self.case_y,image=self.img_monstre,anchor=NW,tag="monstre1")
self.barre_vie=canvas.create_rectangle(self.xBV+10,self.yBV+45,self.xmaxBV+50,self.yBV+48,fill=self.color)#Creation barre de vie
liste_ennemi.append(self.monstre)
self.detection()
def deplacement(self):
droite=0
bas=0
self.indication()
canvas.itemconfigure(self.barre_vie,fill=self.color)
if len(canvas.find_withtag("monstre1"))>0:
if self.vie_monstre<=0:
if liste_ennemi.remove(self.monstre) is not None:
liste_ennemi.remove(self.monstre)
canvas.delete(self.monstre)
canvas.delete(self.barre_vie)
if self.case_x < spriteX_max:
if niveau.liste[self.case_y][self.case_x+1] == 'a':
liste_ennemi.remove(self.monstre)
canvas.delete(self.monstre)
canvas.delete(self.barre_vie)
if self.case_y < spriteY_max:
if niveau.liste[self.case_y+1][self.case_x] != 'm':
bas=1
if self.case_x < spriteX_max-1:
if niveau.liste[self.case_y][self.case_x+1] != 'm':
droite=1
if bas==1:
self.case_y += 1
self.y = self.case_y * taille_sprite
self.yBV=self.case_y * taille_sprite
self.xmaxBV=self.case_x * taille_sprite
if self.jaune==True:
self.xmaxBV-=20
if self.rouge==True:
self.xmaxBV-=35
canvas.coords(self.monstre,self.x,self.y)
canvas.coords(self.barre_vie,self.xBV+10,self.yBV+45,self.xmaxBV+50,self.yBV+48)
if droite==1:
self.case_x += 1
self.x = self.case_x * taille_sprite
self.xBV=self.case_x * taille_sprite
self.xmaxBV=self.case_x * taille_sprite
if self.jaune==True:
self.xmaxBV-=20
if self.rouge==True:
self.xmaxBV-=35
canvas.coords(self.monstre,self.x,self.y)
canvas.coords(self.barre_vie,self.xBV+10,self.yBV+45,self.xmaxBV+50,self.yBV+48)
canvas.after(self.vitesse,self.deplacement)
def indication (self):
if ((100*self.vie_monstre)/self.vie_monstre_ref)<=55:
self.color='yellow'
self.jaune=True
if ((100*self.vie_monstre)/self.vie_monstre_ref)<=15:
self.jaune=False
self.color='red'
self.rouge=True
def centre(self):
self.centreX=self.case_x-30
self.centreY=self.case_y-30
def vie(self):
if liste_ennemi[0]==self.monstre:
print(self.vie_monstre)
self.vie_monstre-=1
print(liste_ennemi)
def detection(self):
if len(canvas.find_withtag("zoneT1"))>0:#On test si il y a des hitbox sur le terrain pour ne pas lancer toute la fonction en boucle
bbox=canvas.bbox(self.monstre)
if bbox is not None:
xminM,yminM,xmaxM,ymaxM=canvas.bbox(self.monstre) #Coordonnées de l'ennemi
hitbox=canvas.find_overlapping(xminM,yminM,xmaxM,ymaxM) #On regarde quand les coordonnées de l'ennemi entre en collision avec un objet
for i in hitbox:
tag=canvas.gettags(i) #On chercher le tag de notre hitbox('zoneT1')
if len(tag)>0:
if tag ==('zoneT1',) or tag==('zoneT1', 'current') : #Si il est present on lance la fonction attaque
self.vie()
canvas.after(self.vitesse_tir,self.detection)
class TourBleu:
def __int__(self,niveau):
self.color="bleu"
self.cost=10
class Tour:
def __init__(self,niveau,xdepart,ydepart):
self.xdepart=xdepart
self.ydepart=ydepart
self.niveau=niveau
self.tour1_menu_image= PhotoImage(file="images/tour1_menu.gif")
self.tour1_image=PhotoImage(file="images/tour1.gif")
self.tour1_menu=canvas.create_image(xdepart,ydepart,anchor=NW,image=self.tour1_menu_image)
self.list_tour=[]
#Variable vitesse tir des tours
self.vitesse_tir=vitesse_tir
def clic(self,event):
""" Gestion de l'événement Clic gauche """
global DETECTION_CLIC_SUR_OBJET
# position du pointeur de la souris
X = event.x
Y = event.y
# coordonnées de l'objet
[xmin,ymin,xmax,ymax] = canvas.bbox(self.tour1_menu)
if xmin<=X<=xmax and ymin<=Y<=ymax:
DETECTION_CLIC_SUR_OBJET = True
self.cercleT1=canvas.create_oval(X-centre_cercleT1,Y-centre_cercleT1,X+centre_cercleT1,Y+centre_cercleT1)
self.tour1=canvas.create_image(X-centreTour1,Y-centreTour1,anchor=NW,image=self.tour1_image)
else:
DETECTION_CLIC_SUR_OBJET = False
def drag(self,event):
""" Gestion de l'événement bouton gauche enfoncé """
X = event.x
Y = event.y
if DETECTION_CLIC_SUR_OBJET == True:
# limite de l'objet dans la zone graphique
if X<centreTour1:X=centreTour1
if X>largeur-centreTour1: X=largeur-centreTour1
if Y<centreTour1: Y=centreTour1
if Y>hauteur-centreTour1: Y=hauteur-centreTour1
# mise à jour de la position de l'objet (drag)
canvas.coords(self.tour1,X-centreTour1,Y-centreTour1)
canvas.coords(self.cercleT1,X-centre_cercleT1,Y-centre_cercleT1,X+centre_cercleT1,Y+centre_cercleT1)
def case(self,event):
X=event.x
Y=event.y
caseX= X/taille_sprite
caseY= Y/taille_sprite
self.caseX_Arrondi=floor(caseX)# arrondi en dessous
self.caseY_Arrondi=floor(caseY)# arrondi en dessous
if self.caseX_Arrondi>spriteX_max:
self.caseX_Arrondi=spriteX_max # variable pour ne pas depasser l'ecran
if self.caseX_Arrondi<0:
self.caseX_Arrondi=0
if self.caseY_Arrondi>spriteY_max:
self.caseY_Arrondi=spriteY_max # variabale pour ne pas depasser l'ecran
def positionnement(self,event):
X=event.x
Y=event.y
if DETECTION_CLIC_SUR_OBJET == True:
if Y>=(spriteY_max*taille_sprite):
canvas.delete(self.tour1)
canvas.delete(self.cercleT1)
if Y<(spriteY_max*taille_sprite):
if niveau.liste[self.caseY_Arrondi][self.caseX_Arrondi]=="0" or niveau.liste[self.caseY_Arrondi][self.caseX_Arrondi]=="d" or niveau.liste[self.caseY_Arrondi][self.caseX_Arrondi]=="a":
canvas.delete(self.tour1)
canvas.delete(self.cercleT1)
if niveau.liste[self.caseY_Arrondi][self.caseX_Arrondi]=="m":
if [self.caseX_Arrondi,self.caseY_Arrondi] in self.list_tour:
canvas.delete(self.tour1)
canvas.delete(self.cercleT1)
canvas.delete(self.hitboxT1)
else:
canvas.coords(self.tour1,self.caseX_Arrondi*taille_sprite,self.caseY_Arrondi*taille_sprite)
canvas.delete(self.cercleT1)
self.xTour=(self.caseX_Arrondi*taille_sprite)+centreTour1
self.yTour=(self.caseY_Arrondi*taille_sprite)+centreTour1
self.xminHitboxT1=((self.caseX_Arrondi*taille_sprite)+centreTour1)-centre_cercleT1
self.yminHitboxT1=((self.caseY_Arrondi*taille_sprite)+centreTour1)-centre_cercleT1
self.xmaxHitboxT1=((self.caseX_Arrondi*taille_sprite)+centreTour1)+centre_cercleT1
self.ymaxHitboxT1=((self.caseY_Arrondi*taille_sprite)+centreTour1)+centre_cercleT1
hitboxT1=canvas.create_oval(self.xminHitboxT1,self.yminHitboxT1,self.xmaxHitboxT1,self.ymaxHitboxT1,width=0,fill='',tags="zoneT1")#Creation de la hitbox
self.liste()
def liste(self):
self.list_case=[self.caseX_Arrondi,self.caseY_Arrondi]
self.list_tour.append(self.list_case)
def projectile(self):
self.xminTir=(self.caseX_Arrondi*taille_sprite)+25
self.yminTir=(self.caseY_Arrondi*taille_sprite)+25
self.xmaxTir=(self.caseX_Arrondi*taille_sprite)+35
self.ymaxTir=(self.caseY_Arrondi*taille_sprite)+35
self.tir=canvas.create_oval(self.xminTir,self.yminTir,self.xmaxTir,self.ymaxTir,fill='red')
self.deplacement_projectile()
def deplacement_projectile(self):
if self.xminTir!=centrex or self.yminTir!=centrey:
canvas.coords(self.tir,self.xminTir+10,self.yminTir+10,self.xmaxTir+10,self.ymaxTir+10)
canvas.after(100,self.projectile)
def distance(self):
d_ennemi=sqrt((self.yTour-mechant.centreY)^2+(self.xTour-mechant.centreX)^2)
def vie(mechant):
mechant.centre()
mechant.vie()
def vague():
global a,Nb_ennemi,scenario,Vague,liste_ennemi
if a<Nb_ennemi:
mechant=Mechant(niveau,500,5,1000)#(niveau,vitesse_deplacement,vie,vitesse_tir)
mechant.creation()
mechant.deplacement()
Vague=True
a+=1
canvas.after(700,vague)
if a==Nb_ennemi and len(canvas.find_withtag("monstre1"))==0:
a=0
liste_ennemi=[]
def temporaire():
mechant=Mechant(niveau,2000,3)
mechant.creation()
mechant.deplacement()
mechant.detection()
def clic_gauche(event):
tour.clic(event)
def drag(event):
tour.drag(event)
tour.case(event)
def relacher(event):
tour.positionnement(event)
canvas.bind('<B1-Motion>',drag) # événement bouton gauche enfoncé (hold down)
canvas.bind('<ButtonRelease-1>',relacher)
def verif(event):
saving = messagebox.askokcancel('Quitter ?', 'Êtes-vous certain ?')
if saving :
global quitter
quitter = True
fenetre.destroy()
def clic2(event):
X = event.x
Y = event.y
print (X,Y)
def quitter_jeu():
global quitter
quitter = True
fenetre.destroy()
def destroy():
fond.destroy()
bouton_jouer.destroy()
bouton_quitter.destroy()
canvas.delete("ALL")
def fenetre_debut():
destroy()
jeu()
def jeu():
niveau = Niveau(choix)
niveau.generer()
niveau.afficher()
print("2")
#placé ici pour reset la vie après avoir fermé la fenêtre
vie_joueur = 5
mechant=Mechant(niveau,0,0,0)
tour= Tour(niveau,80,800)
canvas.bind('<Button-1>',clic_gauche)
compteur_vie = Label (canvas, text="Vies restantes : " +str(vie_joueur), font="bold", fg="red", bg="grey")
compteur_vie.pack()
canvas.create_window(1200,900,window=compteur_vie)
Button(fenetre,text="LANCER",command=vague, anchor=S).pack()
Button(fenetre, text="Menu", command=fenetre.destroy,anchor=S).pack()
if quitter==True :
quitter_jeu()
################################################ Variables #########################################################
choix = 'niveaux1'
taille_sprite=60
spriteX_max=24
spriteY_max=13
centreTour1=30
tailleCercle1=170
largeur=1500
hauteur=960
DETECTION_CLIC_SUR_OBJET = False
vitesse_tir=500
centre_cercleT1=150
Nb_ennemi=3
a=0
scenario=0
Vague=True
liste_ennemi=[]
centrex=1400
centrey=500
game_over = False
quitter=False
lancer_le_jeu=1
############################################## Principal ##########################################################
######## fenêtre menu #########
fenetre = Tk()
fenetre.attributes("-fullscreen",1)
fenetre.bind("<Escape>", verif)
if lancer_le_jeu == 1:
canvas=Canvas(fenetre,background="white", width=largeur, height=hauteur)
canvas.pack(fill="both", expand=1)
canvas.bind('<Button-1>', clic2)
# image
fond = PhotoImage(file="images/fond menu.gif")
bouton1 = PhotoImage(file = "images/bouton jouer.gif")
bouton2 = PhotoImage(file = "images/bouton quitter.gif")
titre = PhotoImage(file = "images/titre.gif")
#image de fond
canvas.create_image(0,0,image=fond, anchor=NW)
canvas.create_image(781,438, image=titre)
#canvas.create_image(1000,200,image=bouton)
#bouton_jouer = Button(canvas,image=bouton)
#si le joueur a perdu alors le bouton devient "rejouer"
if game_over == True :
Button(canvas, text="Rejouer ?", command=fenetre.destroy,anchor=N).pack()
else :
bouton_jouer = Button(canvas,image=bouton1,command=fenetre_debut)
bouton_jouer.place(x=100, y= 600)
bouton_quitter = Button(canvas,image=bouton2,command=quitter_jeu)
bouton_quitter.place(x=900, y=590)
fenetre.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
2ff3d6352d5241a08ded28a3f45e2e30c903eee7 | 1841c29ffb26901bc7830b2ce4ea712197f1b740 | /models/GraphNN/DTNN.py | e2ad09c182e6617d8bbdf55b57b5fb2b13b136e6 | [
"MIT"
] | permissive | chubbymaggie/Deep_Learning_On_Code_With_A_Graph_Vocabulary | 756bdd54b17d351d31200cc0ceacf8f639e0c678 | 29ee2fdffc5bc05582a91025697e256980e75ef2 | refs/heads/master | 2020-03-28T12:33:33.820671 | 2018-09-10T22:54:14 | 2018-09-10T22:54:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,446 | py | # Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
from collections import OrderedDict
from mxnet import gluon
from models.FITB.FITBModel import FITBModel
from models.GraphNN.MPNN import MPNN
class DTNN(MPNN):
'''
Deep Tensor Neural Network from https://www.nature.com/articles/ncomms13890
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.hidden_size = kwargs['hidden_size']
# Initializing model components
with self.name_scope():
self.hidden_message_dense = gluon.nn.Dense(self.hidden_size, in_units=self.hidden_size)
self.hidden_and_edge_dense = gluon.nn.Dense(self.hidden_size, in_units=self.hidden_size)
self.edge_type_weightings = OrderedDict()
for t in self.data_encoder.all_edge_types:
edge_type_weighting = self.params.get('edge_type_weighting_{}'.format(t), grad_req='write',
shape=(1, self.hidden_size))
self.__setattr__('edge_type_weighting_{}'.format(t), edge_type_weighting)
self.edge_type_weightings[t] = edge_type_weighting
if FITBModel in self.__class__.mro():
self.readout_mlp = gluon.nn.HybridSequential()
with self.readout_mlp.name_scope():
self.readout_mlp.add(gluon.nn.Dense(self.hidden_size, activation='tanh', in_units=self.hidden_size))
self.readout_mlp.add(gluon.nn.Dense(1, in_units=self.hidden_size))
def compute_messages(self, F, hidden_states, edges, t):
hidden_states = self.hidden_message_dense(hidden_states)
summed_msgs = []
for key in self.edge_type_weightings.keys():
adj_mat, edge_type_weighting = edges[key], self.edge_type_weightings[key]
# Compute the messages passed for this edge type
passed_msgs = F.tanh(
self.hidden_and_edge_dense(hidden_states * edge_type_weighting.data())) # n_vertices X hidden_size
# Sum messages from all neighbors
summed_msgs.append(F.dot(adj_mat, passed_msgs))
summed_msgs = F.sum(F.stack(*summed_msgs), axis=0)
return summed_msgs
def update_hidden_states(self, F, hidden_states, messages, t):
return hidden_states + messages
def readout(self, F, hidden_states):
return self.readout_mlp(hidden_states)
| [
"mwcvitkovic@gmail.com"
] | mwcvitkovic@gmail.com |
f3e029ef5acbe8e796a4ba75d99292456d5d7dd7 | 8832f83436809e8e918e60e5526d95add9fe8dbd | /books_app/migrations/0069_auto_20191002_1610.py | 825b2b23a78d57c127bd9697fe680eaecabd9d58 | [] | no_license | HCDigitalScholarship/booksofduchesses | e31e56eaba253b92a1362de5918b5b005cb27f3c | 3f0e27515963c92a56714c5bada3b6a68a8665df | refs/heads/master | 2022-12-09T18:41:20.019687 | 2021-10-25T14:58:18 | 2021-10-25T14:58:18 | 190,254,161 | 0 | 3 | null | 2022-12-08T05:21:54 | 2019-06-04T18:05:08 | Python | UTF-8 | Python | false | false | 849 | py | # Generated by Django 2.2.2 on 2019-10-02 16:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("books_app", "0068_auto_20190930_1758")]
operations = [
migrations.AddField(
model_name="text",
name="estc_link",
field=models.CharField(
blank=True, max_length=800, verbose_name="ESTC Link"
),
),
migrations.AlterField(
model_name="text",
name="ihrt_link",
field=models.CharField(blank=True, max_length=800),
),
migrations.AlterField(
model_name="text",
name="me_compendium_link",
field=models.CharField(
blank=True, max_length=200, verbose_name="ME Compendium Link"
),
),
]
| [
"apjanco@gmail.com"
] | apjanco@gmail.com |
a901c6830b2d98e5814d5c7d028e4744d7464039 | fa8344a5f37ffd5f10bef00a6f7cab6acc6382f7 | /apps/family/views.py | 5b69f137536d5f86284785fb0032fdfff6f28e77 | [] | no_license | hustels/django-crud-ajax-demo | 2fcd8d3e1c050a76ec1d7e16cfd04fbf1175d8fa | 06de79bf60918b7e270491e4d5cbc7e1422ffd23 | refs/heads/master | 2021-01-12T19:43:44.480199 | 2011-11-16T01:07:31 | 2011-11-16T01:07:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,466 | py | # -*- coding: utf-8 -*-
from django.views.generic.simple import direct_to_template
from django.http import HttpResponse
from django.db.models import Q
from family.models import People
from family.forms import PeopleForm, ImporterForm
def index(request, form=None, peoples=None, importer_form=None):
if form is None:
form = PeopleForm()
if peoples is None:
peoples = People.objects.all()
if importer_form is None:
importer_form = ImporterForm()
context = {'form': form, 'importer_form': importer_form, 'peoples': peoples}
return direct_to_template(request, 'family_tree.html', context)
def add(request):
if request.POST:
form = PeopleForm(request.POST)
if form.is_valid():
form.save()
form = PeopleForm()
return index(request, form)
return HttpResponse('Fill the form')
def filter(request):
if request.GET:
s = request.GET.get('s')
peoples = People.objects.filter(Q(first_name__icontains=s) | Q(last_name__icontains=s))
return index(request, None, peoples)
return HttpResponse('Enter a valid string to search')
def importer(request):
if request.POST:
form = ImporterForm(request.POST, request.FILES)
if form.is_valid():
form = ImporterForm()
return index(request, None, None, form)
return HttpResponse('Select the file')
| [
"gustavo@gustavohenrique.net"
] | gustavo@gustavohenrique.net |
7725709e661cfee7b68604a65aeaac90af786093 | 96818d4bb3803a386026ec59d2e2a653c7dff324 | /NIT_Admission_System/src/NIT_Admission_System/urls.py | c1d67ff6bd8fa97fcded86db1f8fa613d434c5f4 | [
"MIT"
] | permissive | abhijithanilkumar/NAS | 4211405ad619474f4590aaddcffc5206e849ee2f | b8c766201f66696a296b5a76a968214d500254b9 | refs/heads/master | 2020-04-06T03:53:26.511287 | 2015-06-16T09:46:15 | 2015-06-16T09:46:15 | 32,319,542 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 702 | py | from django.conf.urls import patterns, include, url
from django.contrib import admin
from django.conf import settings
from django.conf.urls.static import static
import profiles.urls
import accounts.urls
from . import views
urlpatterns = patterns(
'',
url(r'^$', views.HomePage.as_view(), name='home'),
url(r'^about/$', views.AboutPage.as_view(), name='about'),
url(r'^', include(accounts.urls, namespace='accounts')),
url(r'^users/', include(profiles.urls, namespace='profiles')),
url(r'^admin/', include(admin.site.urls)),
)
# User-uploaded files like profile pics need to be served in development
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"abhijithanilkumar@live.com"
] | abhijithanilkumar@live.com |
7e6327db88706423541fdcd736a385bd77c5cb65 | 4cbe1145ef29aab3ef17c5ebd74dd94c2fece7c7 | /Guess_the_country_name.py | 90fcc438f7e08aaa6de061bf0db40eacbdddfe9e | [
"MIT"
] | permissive | jyothi1910/Guess_the_country_name | 2903b4d943b8d9495559c8d68756fbc9ce88e0d8 | fc154488a4f3ff6aa7e27a127963f2c5a4c42628 | refs/heads/main | 2023-06-06T09:53:28.414026 | 2021-06-21T07:00:16 | 2021-06-21T07:00:16 | 378,835,740 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 528 | py |
import random
Country_names=["India","Korea","Japan","China","USA"]
print(Country_names)
randomName = random.choice(Country_names)
for guessTaken in range(1,4):
guess=input("Take a guess:")
if guess != randomName:
print("Your guess is wrong, try another guess")
elif guess == randomName:
print("Well done you guess is correct, you guessed country name in ",end="")
print(str(guessTaken)+" guess")
break
else:
print("Nope, The correct country name is "+randomName)
| [
"noreply@github.com"
] | noreply@github.com |
da05f206093955bc97ef19a62bc0a70e69711fc6 | 5e9dacbb7a9613b7c8d8c92398bb66926a314c38 | /script.py | ecff88305875f987118660b170ce2849290d9f87 | [] | no_license | pol9111/tencent_WechatOffAcc_auto | 645b95bfd893706df4651f1e8f67ea1dc57a03de | 3aa2a9a8a78796d5b829f9bf49cc849713ed41b7 | refs/heads/master | 2020-03-24T16:24:08.783424 | 2018-07-30T04:07:14 | 2018-07-30T04:07:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,250 | py | import json
import pymongo
import re
from config import *
def response(flow):
global like_num, title, pub_time, read_num, comment_num
client = pymongo.MongoClient(MONGO_URL)
db = client[MONGO_DB]
table = db[MONGO_COLLECTION]
# 获取微信广告json文件, 里面有阅读数和点赞数
url_msg = 'mp.weixin.qq.com/mp/getappmsgext?'
if url_msg in flow.request.url:
text_msg = flow.response.text
data_py = json.loads(text_msg)
content = data_py.get('appmsgstat')
like_num = content.get('like_num')
read_num = content.get('read_num')
comment_num = data_py.get('comment_count')
# 获取文章响应文件, 并匹配标题和发布时间
url_article = 'mp.weixin.qq.com/s?'
if url_article in flow.request.url:
text_arti = flow.response.text
pub_time = re.findall(r'publish_time.*"(\d+-\d+-\d+)".*', text_arti)[0]
title = re.findall(r'msg_title\s=\s"(.*?)";', text_arti)[0]
data = {
'文章标题': title,
'发布时间': pub_time,
'阅读数': read_num,
'点赞数': like_num,
'评论数': comment_num,
}
print(data)
table.update({'文章标题': title}, {'$set': data}, True)
| [
"biscuit36@163.com"
] | biscuit36@163.com |
f9da954cdcb17cee51e9d873568d288fdf2c9cdb | f6f29c2fa719c53eee73de2acd86db9e1278182e | /code_wars/calculating_with_functions.py | a3f2c7e84a244f5b3dd4d6052494c5ab40d538cb | [] | no_license | byt3-m3/python_code_practice | ca08320e1778449d30204b65f15903d5830b7975 | 40e215c4d4ab62cf7d55d2456d94550335825906 | refs/heads/master | 2023-07-24T08:29:06.624850 | 2021-09-04T02:39:32 | 2021-09-04T02:39:32 | 256,984,457 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,290 | py | '''
This time we want to write calculations using functions and get the results. Let's have a look at some examples:
seven(times(five())) # must return 35
four(plus(nine())) # must return 13
eight(minus(three())) # must return 5
six(divided_by(two())) # must return 3
Requirements:
There must be a function for each number from 0 ("zero") to 9 ("nine")
There must be a function for each of the following mathematical operations: plus, minus, times, dividedBy (divided_by in Ruby and Python)
Each calculation consist of exactly one operation and two numbers
The most outer function represents the left operand, the most inner function represents the right operand
Divison should be integer division. For example, this should return 2, not 2.666666...:
'''
def _process(data, base):
num = data[0]
oper = data[1]
if oper == "*":
return base * num
if oper == "/":
return base // num
if oper == "+":
return base + num
if oper == "-":
return base - num
def zero(data=None):
if isinstance(data, tuple):
return _process(data, 0)
return 0
def one(data=None):
if isinstance(data, tuple):
return _process(data, 1)
return 1
def two(data=None):
if isinstance(data, tuple):
return _process(data, 2)
return 2
def three(data=None):
if isinstance(data, tuple):
return _process(data, 3)
return 3
def four(data=None):
if isinstance(data, tuple):
return _process(data, 4)
return 4
def five(data=None):
if isinstance(data, tuple):
return _process(data, 5)
return 5
def six(data=None):
if isinstance(data, tuple):
return _process(data, 6)
return 6
def seven(data=None):
if isinstance(data, tuple):
return _process(data, 7)
return 7
def eight(data=None):
if isinstance(data, tuple):
return _process(data, 8)
return 8
def nine(data=None):
if isinstance(data, tuple):
return _process(data, 9)
return 9
def plus(num):
return (num, "+")
def minus(num):
return (num, "-")
def times(num):
return (num, "*")
def divided_by(num):
return (num, "/")
result_1 = one(minus(five()))
result_2 = five(times(seven()))
print(result_1)
print(result_2)
| [
"cbaxtertech@gmail.com"
] | cbaxtertech@gmail.com |
d3a11b03a1299ac463f87eea644d12b3ad7ee1da | d29baf1efe018a9f30280cdfb6fabb553ea27b86 | /pruebas/pruebas aisladas/pestañas y su ejecucion.py | 80210068f93a827fdb363021c4ad02d5e4a5ead7 | [] | no_license | ColqueRicardo/v-version | 7c3bd074a5ce9e3b774d1cbd95a4059683d7b46e | 45fd067443e084f510f69053a70507956edae0a2 | refs/heads/master | 2023-01-22T22:56:03.732007 | 2020-12-07T02:41:36 | 2020-12-07T02:41:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,261 | py | from tkinter import *
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
from pynput.mouse import Button as Mouse_button, Controller
import xml.etree.ElementTree as ET
def archivoxml():
bd = ET.Element("base")
ventana = ET.SubElement(bd, "ventana", name="ventana-consultas")
ventana_hide = ET.SubElement(ventana, "ventana-hide", )
ventana_hide.set("option-hide", "0")
ET.dump(bd)
tree = ET.ElementTree(bd)
tree.write("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
def modificar_leer_xml():
estructura_xml = ET.parse("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
# Obtiene el elemento raíz:
raiz = estructura_xml.getroot()
print("primer for")
for elemento_hijo in raiz:
print(elemento_hijo)
print("segundo for")
for ventana in raiz.findall("ventana"):
print(ventana.find("ventana-consultas"))
for ventana in raiz.iter('ventana'):
ventana.text = "nuevotexto"
ventana.set("option-hide", "1")
print(ventana.get("option-hide"))
estructura_xml.write("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
def opciones(event):
mouse= Controller()
mouse.click(Mouse_button.left,1)
Varialbe_ubicacion = "{0}x{0}+" + str(vent.winfo_pointerx()) + "+" + str(vent.winfo_pointery())
'''root.geometry("{0}x{0}+0+0".format(root.winfo_screenwidth(), root.winfo_screenheight()))'''
opciones_vent.geometry(Varialbe_ubicacion.format(175, 40))
opciones_vent.deiconify()
'''se a abierto una ventana modificar el xml'''
def crear():
frame.append(Frame(note))
frame[len(frame)-1].bind("<<FocusIn>>", f)
note.add(frame[len(frame) - 1], text="Consulta")
consulta.append(Entry(frame[len(frame)-1]))
consulta[len(consulta)-1].place(x=0,y=0,relheight=1,relwidth=1)
opciones_vent.withdraw()
''' print("framme")
print(frame)
print("text")
print(consulta)
'''
def hola():
try:
indice_notebook_general=note.index(tk.CURRENT)
except:
indice_notebook_general=0
print(indice_notebook_general)
def cerrar():
note.select(note.index(tk.CURRENT))
note.forget(note.index(tk.CURRENT))
frame.pop(note.index(tk.CURRENT))
consulta.pop(note.index(tk.CURRENT))
print(len(consulta))
print(len(frame))
opciones_vent.withdraw()
''' mause.click(vent.winfo_pointerx(),vent.winfo_pointery())'''
def opciones_withdraw():
'''se a abierto una ventana modificar el xml'''
estructura_xml = ET.parse("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
# Obtiene el elemento raíz:
raiz = estructura_xml.getroot()
for ventana in raiz.iter('ventana'):
if int(ventana.get("option-hide"))==1:
ventana.set("option-hide","0")
opciones_vent.withdraw()
else:
ventana.set("option-hide","1")
estructura_xml.write("C:/Users/ricar/Desktop/pruebas v1/pruebasv1.xml")
def esconder_opciones(event):
opciones_withdraw()
def f(event):
print("focus")
print(note.index(tk.CURRENT))
''' print(str(note.index(ttk.CURRENT())))'''
def notebook_cambio(event):
print(note.index(tk.CURRENT))
print("cambio")
'''ButtonRelease-1'''
'''ubicacion[0]--> eje x , [1]-->y'''
def ver():
print(note.tabs())
for i in range(len(note.tabs())):
print(note.tab(i))
''' print(note.identify(x,y))'''
''' print(note.tab(0, option=identify(x,y)))
'''
frame=[]
consulta=[]
opciones_vent= Tk()
butons= Button(opciones_vent)
boton_crear = Button(opciones_vent, text="Crear", command=crear,cursor="hand2",relief=FLAT)
boton_crear.place(x=0,rely=0,relheight=0.2,relwidth=1)
boton_cerrar = Button(opciones_vent, text="Cerrar", command=cerrar,cursor="hand2",relief=FLAT)
boton_cerrar.place(x=0,rely=0.2,relheight=0.2,relwidth=1)
opciones_vent.overrideredirect(1)
opciones_vent.geometry("200x100")
opciones_vent.withdraw()
opciones_vent.bind("<FocusOut>",esconder_opciones)
vent= Tk()
vent.geometry("500x250")
note = ttk.Notebook(vent)
note.pack(fill="both",expand="yes")
note.bind("<3>",opciones)
vent.bind("<1>",esconder_opciones)
note.bind("<<NotebookTabChanged>>",notebook_cambio)
boton=Button(vent,command=ver,text="ver").pack()
vent.mainloop()
| [
"ricardocolquerc@gmail.com"
] | ricardocolquerc@gmail.com |
2624fc2cc2558380db72259518fe7ca467c5f0a4 | 5fa98b709c68c1f8c8aa8914b9ec790f21c60c98 | /tests/test_updates.py | 229c23a3f617313abddd80dde8a374f78913e73a | [
"MIT"
] | permissive | Nextpertise/versionalchemy | b0085d1b383e5ff2d13fe0caa6b824f33476de0f | ba14a8f3369db568e7eb300630e96ef8c67b79a5 | refs/heads/master | 2020-05-15T12:30:19.228686 | 2019-07-05T08:19:48 | 2019-07-05T08:19:48 | 182,266,873 | 0 | 1 | MIT | 2019-04-29T13:37:12 | 2019-04-19T13:13:32 | Python | UTF-8 | Python | false | false | 5,844 | py | import os
import sqlalchemy as sa
from sqlalchemy.exc import IntegrityError
from sqlalchemy.orm import sessionmaker
import unittest
from tests.models import (
ArchiveTable,
Base,
UserTable,
)
from tests.utils import (
SQLiteTestBase,
VaTestHelpers,
)
class TestUpdate(SQLiteTestBase):
def test_product_update(self):
p = UserTable(**self.p1)
self._add_and_test_version(p, 0)
p.col1 = 'new'
p.col2 = -1
self._add_and_test_version(p, 1)
self._verify_row(dict(self.p1, **{
'col1': 'new',
'col2': -1
}), 1)
self._verify_archive(self.p1, 0)
self._verify_archive(dict(self.p1, **{
'col1': 'new',
'col2': -1,
}), 1, log_id=p.va_id)
def test_product_update_fails(self):
"""
Insert a product. Construct a new ORM object with the same id as the inserted object
and make sure the insertion fails.
"""
# Initial product insert
p = UserTable(**self.p1)
self._add_and_test_version(p, 0)
# Create a new row with the same primary key and try to insert it
p_up = dict(
col1='newcol',
col2=5,
col3=False,
product_id=10,
)
p_up_row = UserTable(**p_up)
with self.assertRaises(IntegrityError):
self._add_and_test_version(p_up_row, 1)
def test_update_no_changes(self):
'''
Add an unchanged row and make sure the version does not get bumped.
'''
p = UserTable(**self.p1)
self._add_and_test_version(p, 0)
p.col1 = self.p1['col1']
self.session.add(p)
self.session.commit()
self._verify_archive(self.p1, 0)
self.assertEqual(len(self.session.query(ArchiveTable).all()), 1)
def test_multiple_product_updates(self):
"""
Update a product multiple times and ensure each one gets
correctly versioned.
"""
p = UserTable(**self.p1)
self._add_and_test_version(p, 0)
p.col1 = 'new'
p.col2 = -1
self._add_and_test_version(p, 1)
p.col1 = 'third change'
p.col2 = 139
p.col3 = False
self._add_and_test_version(p, 2)
self._verify_row(dict(self.p1, **{
'col1': 'third change',
'col2': 139,
'col3': False,
}), 1)
self._verify_archive(self.p1, 0)
self._verify_archive(dict(self.p1, **{
'col1': 'new',
'col2': -1,
}), 1)
self._verify_archive(dict(self.p1, **{
'col1': 'third change',
'col2': 139,
'col3': False,
}), 2, log_id=p.va_id)
def test_product_update_with_user(self):
p = UserTable(**self.p1)
p.updated_by('test_user1')
self._add_and_test_version(p, 0)
p.col1 = 'new'
p.col2 = -1
p.updated_by('test_user2')
self._add_and_test_version(p, 1)
self._verify_row(dict(self.p1, **{
'col1': 'new',
'col2': -1
}), 1)
self._verify_archive(self.p1, 0, user='test_user1')
self._verify_archive(dict(self.p1, **{
'col1': 'new',
'col2': -1,
}), 1, user='test_user2', log_id=p.va_id)
class TestConcurrentUpdate(unittest.TestCase, VaTestHelpers):
DATABASE_URL = 'sqlite:///test.db'
def __init__(self, methodName='runTest'):
self.engine1 = sa.create_engine(
self.DATABASE_URL,
isolation_level='READ UNCOMMITTED',
echo='debug',
logging_name='engine1'
)
self.engine2 = sa.create_engine(
self.DATABASE_URL,
isolation_level='READ UNCOMMITTED',
echo='debug',
logging_name='engine2'
)
self.Session1 = sessionmaker(bind=self.engine1)
self.Session2 = sessionmaker(bind=self.engine2)
Base.metadata.create_all(self.engine1)
UserTable.register(ArchiveTable, self.engine1)
UserTable.register(ArchiveTable, self.engine1)
self.p1 = dict(product_id=10, col1='foobar', col2=10, col3=True)
super(TestConcurrentUpdate, self).__init__(methodName)
def tearDown(self):
delete_cmd = 'delete from {}'
self.engine1.execute(delete_cmd.format(UserTable.__tablename__))
self.engine1.execute(delete_cmd.format(ArchiveTable.__tablename__))
self.Session1.close_all()
self.Session2.close_all()
self.engine1.dispose()
self.engine2.dispose()
@classmethod
def tearDownClass(cls):
os.remove('test.db')
def test_concurrent_product_updates(self):
"""
Assert that if two separate sessions try to update a product row,
one succeeds and the other fails.
"""
p1 = UserTable(**self.p1)
# Create two sessions
session1 = self.Session1()
session2 = self.Session2()
# Add the initial row and flush it to the table
session1.add(p1)
session1.commit()
# Update 1 in session1
p1.col1 = 'changed col 1'
session1.add(p1)
# Update 2 in session 2
p2 = session2.query(UserTable).all()[0]
p2.col2 = 1245600
session2.add(p2)
# this flush should succeed
session2.commit()
session2.close()
# this flush should fail
session1.commit()
session1.close()
final = dict(self.p1, **{'col1': 'changed col 1', 'col2': 1245600})
self._verify_row(final, 2, session=session1)
history = [self.p1, dict(self.p1, **{'col2': 1245600}), final]
for i, expected in enumerate(history):
self._verify_archive(expected, i, session=session1)
| [
"akshay@nerdwallet.com"
] | akshay@nerdwallet.com |
8ffd467913fac03b01efbb61d71509207cf1337c | c5cee987dd9fc79778ae0804048af13f76d7567f | /perTreatment.py | 9b3b7a6b280e3e55d11461e487913dfcd2ed9ed5 | [] | no_license | LingGuguang/new-word-discover | 063dcdf0cc3d95be2306ed8405e3c8cf43d8e829 | 37640afeb8c10216fb44d05d7ed7a1171910f710 | refs/heads/master | 2020-06-01T16:25:11.812212 | 2019-06-08T06:07:35 | 2019-06-08T06:07:35 | 190,849,114 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 667 | py | from Delete import Delete
import pandas as pd
import jieba
import re
# #以下为弹幕去重
txt = 'danmu.txt'
save_txt = 'save.txt'
ammos = []
with open(txt, 'r', encoding='utf-8') as t:
for ammo in t.readlines():
ammos.append(ammo[:-1])
#print(len(ammos)) = (48000)
dele = Delete(ammos)
ammos = dele.delete()
ammos = [re.sub(r"[0-9\s+\.\!\/_,$%^*()?;;:-【】+\"\']+|[+——!,;:。?、~@#¥%……&*()]+", " ", ammo)
for ammo in ammos]
ammos = [ammo.replace(' ','') for ammo in ammos]
with open(save_txt, 'w', encoding='utf-8') as f:
for ammo in ammos:
if(len(ammo)>2):
f.write(ammo + '\n')
| [
"1186986256@qq.com"
] | 1186986256@qq.com |
e3baf698b803e39d4869c69af482d97836496848 | 91d96fc4084a55a74f761ed7bc7d0adba533618a | /projects/pset2.0_Forkable_Difficulty_Adjusting/blockchain-visualizer/visualize.py | 352b2f7230f8cd77c28efa64538cda9744295698 | [
"MIT"
] | permissive | Averylamp/mas.s62 | 169bb76f1289a3d4569a952075bfb8e7842e1dca | 382dc036ae014785be4c464ed8c4aef533fd52ab | refs/heads/master | 2020-03-17T16:14:56.613227 | 2018-05-17T03:56:09 | 2018-05-17T03:56:09 | 133,741,785 | 4 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,143 | py | import pickle
from datetime import datetime
from datetime import timedelta
import matplotlib.pyplot as plt
import graphviz
def mine_rate_info(endpoint_block, origin_block, block_information, time_interval):
endpoint_dt = datetime.fromtimestamp(highest_block[0]['timestamp'])
origin_dt = datetime.fromtimestamp(block_information[origin_block]['timestamp'])
block_hash = endpoint_block
num_buckets = int((endpoint_dt - origin_dt).total_seconds() / time_interval) + 5
mined_buckets = [0]*num_buckets
times_list = [origin_dt + timedelta(seconds=x*time_interval) for x in range(0, num_buckets)]
assert len(times_list) == len(mined_buckets)
while block_hash != '':
block_info = block_information[block_hash]
timestamp = block_information[block_hash]['timestamp']
dt = datetime.fromtimestamp(timestamp)
bucket_ind = int((dt - origin_dt).total_seconds() / time_interval)
mined_buckets[bucket_ind] += 1
block_hash = block_info['blockInformation']['previousHash']
return times_list, mined_buckets
def aggregate_info(mined_buckets):
num_buckets = len(mined_buckets)
aggregate_buckets = [0]*num_buckets
for i in range(num_buckets):
if i == 0:
aggregate_buckets[0] = mined_buckets[0]
else:
aggregate_buckets[i] = aggregate_buckets[i-1] + mined_buckets[i]
return aggregate_buckets
def generate_graphviz(block_information):
g = graphviz.Digraph('G', filename='block_information.gv')
g.node("origin", "")
for block_hash in block_information:
g.node(block_hash, "")
prev_hash = block_information[block_hash]['blockInformation']['previousHash']
if prev_hash == '':
prev_hash = "origin"
g.edge(prev_hash, block_hash)
g.view()
block_information = pickle.load(open("../server-python/block_information.pickle", 'rb'))
highest_block = pickle.load(open("../server-python/highest_block.pickle", 'rb'))
print("Creating graphviz...")
# generate_graphviz(block_information)
print("Done.")
# exit()
# block height 0: 6c179f21e6f62b629055d8ab40f454ed02e48b68563913473b857d3638e23b28
origin_block = "6c179f21e6f62b629055d8ab40f454ed02e48b68563913473b857d3638e23b28"
forked_block = "00001d87846888b85e4b9b757b59a936b0ff33d8128518c78efaa092572efbfd" # Put the hash of another tip here to graph it as well
endpoint_block = highest_block[0]['blockHash']
print(endpoint_block)
time_interval = 0.5 # seconds
times_list, mined_buckets = mine_rate_info(endpoint_block, origin_block, block_information, time_interval)
forked_times_list, forked_mined_buckets = mine_rate_info(forked_block, origin_block, block_information, time_interval)
aggregate_buckets = aggregate_info(mined_buckets)
forked_aggregate_buckets = aggregate_info(forked_mined_buckets)
print("Plotting data...")
# line1, = plt.plot(times_list, mined_buckets, label="blocks mined / {}s".format(time_interval))
line2, = plt.plot(times_list, aggregate_buckets, label="total blocks mined")
# line3, = plt.plot(times_list, forked_mined_buckets, label="attacker blocks mined / {}s".format(time_interval))
line4, = plt.plot(times_list, forked_aggregate_buckets, label="attacker total blocks mined")
plt.legend(handles=[line2, line4])
plt.show()
print("Done")
| [
"averylamp@gmail.com"
] | averylamp@gmail.com |
a2d6c12a2bd7956f2c562f8cfe0e2ac7678d9769 | 3003a8663135aa10f5a152a8642bc6ab270995b9 | /ggCloudSDK/google-cloud-sdk/lib/googlecloudsdk/sql/lib/instances.py | 9580cc32edf3f272d2994243b0b16c424ce6e6fb | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | bopopescu/packmybot | 1b4d199b36d196e5e769a781b520019bb4d0bdbc | 92de1e72cfc51b41447366ffc81a9dcd9a5e7870 | refs/heads/master | 2022-11-25T23:46:06.946645 | 2015-10-22T08:22:04 | 2015-10-22T08:22:04 | 282,313,675 | 0 | 0 | null | 2020-07-24T20:50:10 | 2020-07-24T20:50:10 | null | UTF-8 | Python | false | false | 7,040 | py | # Copyright 2015 Google Inc. All Rights Reserved.
"""Common utility functions for sql instances."""
from googlecloudsdk.calliope import exceptions
class _BaseInstances(object):
"""Common utility functions for sql instances."""
@classmethod
def _SetBackupConfiguration(cls, sql_messages, settings, args, original):
"""Sets the backup configuration for the instance."""
# these args are only present for the patch command
no_backup = not getattr(args, 'backup', True)
if original and (
any([args.backup_start_time, args.enable_bin_log is not None,
no_backup])):
if original.settings.backupConfiguration:
backup_config = original.settings.backupConfiguration[0]
else:
backup_config = sql_messages.BackupConfiguration(
startTime='00:00',
enabled=False),
elif not any([args.backup_start_time, args.enable_bin_log is not None,
no_backup]):
return
if not original:
backup_config = sql_messages.BackupConfiguration(
startTime='00:00',
enabled=False)
if args.backup_start_time:
backup_config.startTime = args.backup_start_time
backup_config.enabled = True
if no_backup:
if args.backup_start_time or args.enable_bin_log is not None:
raise exceptions.ToolException(
('Argument --no-backup not allowed with'
' --backup-start-time or --enable-bin-log'))
backup_config.enabled = False
if args.enable_bin_log is not None:
backup_config.binaryLogEnabled = args.enable_bin_log
cls.AddBackupConfigToSettings(settings, backup_config)
@staticmethod
def _SetDatabaseFlags(sql_messages, settings, args):
if args.database_flags:
settings.databaseFlags = []
for (name, value) in args.database_flags.items():
settings.databaseFlags.append(sql_messages.DatabaseFlags(
name=name,
value=value))
elif getattr(args, 'clear_database_flags', False):
settings.databaseFlags = []
@staticmethod
def _ConstructSettingsFromArgs(sql_messages, args):
"""Constructs instance settings from the command line arguments.
Args:
sql_messages: module, The messages module that should be used.
args: argparse.Namespace, The arguments that this command was invoked
with.
Returns:
A settings object representing the instance settings.
Raises:
ToolException: An error other than http error occured while executing the
command.
"""
settings = sql_messages.Settings(
tier=args.tier,
pricingPlan=args.pricing_plan,
replicationType=args.replication,
activationPolicy=args.activation_policy)
# these args are only present for the patch command
clear_authorized_networks = getattr(args, 'clear_authorized_networks',
False)
clear_gae_apps = getattr(args, 'clear_gae_apps', False)
if args.authorized_gae_apps:
settings.authorizedGaeApplications = args.authorized_gae_apps
elif clear_gae_apps:
settings.authorizedGaeApplications = []
if any([args.assign_ip is not None, args.require_ssl is not None,
args.authorized_networks, clear_authorized_networks]):
settings.ipConfiguration = sql_messages.IpConfiguration()
if args.assign_ip is not None:
settings.ipConfiguration.enabled = args.assign_ip
if args.authorized_networks:
settings.ipConfiguration.authorizedNetworks = args.authorized_networks
if clear_authorized_networks:
# For patch requests, this field needs to be labeled explicitly cleared.
settings.ipConfiguration.authorizedNetworks = []
if args.require_ssl is not None:
settings.ipConfiguration.requireSsl = args.require_ssl
if any([args.follow_gae_app, args.gce_zone]):
settings.locationPreference = sql_messages.LocationPreference(
followGaeApplication=args.follow_gae_app,
zone=args.gce_zone)
if getattr(args, 'enable_database_replication', None) is not None:
settings.databaseReplicationEnabled = args.enable_database_replication
return settings
@classmethod
def ConstructInstanceFromArgs(cls, sql_messages, args,
original=None, instance_ref=None):
"""Construct a Cloud SQL instance from command line args.
Args:
sql_messages: module, The messages module that should be used.
args: argparse.Namespace, The CLI arg namespace.
original: sql_messages.DatabaseInstance, The original instance, if some of
it might be used to fill fields in the new one.
instance_ref: reference to DatabaseInstance object, used to fill project
and instance information.
Returns:
sql_messages.DatabaseInstance, The constructed (and possibly partial)
database instance.
Raises:
ToolException: An error other than http error occured while executing the
command.
"""
settings = cls._ConstructSettingsFromArgs(sql_messages, args)
cls._SetBackupConfiguration(sql_messages, settings, args, original)
cls._SetDatabaseFlags(sql_messages, settings, args)
# these flags are only present for the create command
region = getattr(args, 'region', None)
database_version = getattr(args, 'database_version', None)
instance_resource = sql_messages.DatabaseInstance(
region=region,
databaseVersion=database_version,
masterInstanceName=getattr(args, 'master_instance_name', None),
settings=settings)
if hasattr(args, 'master_instance_name'):
if args.master_instance_name:
replication = 'ASYNCHRONOUS'
activation_policy = 'ALWAYS'
else:
replication = 'SYNCHRONOUS'
activation_policy = 'ON_DEMAND'
if not args.replication:
instance_resource.settings.replicationType = replication
if not args.activation_policy:
instance_resource.settings.activationPolicy = activation_policy
if instance_ref:
cls.SetProjectAndInstanceFromRef(instance_resource, instance_ref)
return instance_resource
class InstancesV1Beta3(_BaseInstances):
"""Common utility functions for sql instances V1Beta3."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.instance = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = [backup_config]
class InstancesV1Beta4(_BaseInstances):
"""Common utility functions for sql instances V1Beta4."""
@staticmethod
def SetProjectAndInstanceFromRef(instance_resource, instance_ref):
instance_resource.project = instance_ref.project
instance_resource.name = instance_ref.instance
@staticmethod
def AddBackupConfigToSettings(settings, backup_config):
settings.backupConfiguration = backup_config
| [
"cboussicaud@leaseplan.fr"
] | cboussicaud@leaseplan.fr |
447215391bd91ac4d5a721c47f8d0298d4eb5b3f | c001d8cff7e634bfa19d682ccdcf5261bc7bf397 | /cotizacionweb/migrations/0005_auto_20160420_1104.py | f9051f65ba22309b3fc40fa1bad989072d8ebdc8 | [] | no_license | yusnelvy/mtvmcotizacionv02 | 4053a6883519901e3652a141ef83c297c5aa0ccd | f0d94faff9c721f25018b7db12a07786508da565 | refs/heads/master | 2021-01-21T12:58:49.014716 | 2016-05-06T20:49:59 | 2016-05-06T20:49:59 | 50,135,715 | 0 | 0 | null | 2016-05-25T12:32:34 | 2016-01-21T20:48:27 | CSS | UTF-8 | Python | false | false | 2,464 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('contenedor', '0005_contenedortipicopormueble_tipo_de_contenido'),
('cotizacionweb', '0004_auto_20160414_1529'),
]
operations = [
migrations.RenameField(
model_name='serviciomueble',
old_name='porcentaje_complejidad',
new_name='cantidad',
),
migrations.RenameField(
model_name='serviciomueble',
old_name='descripcion_monto_servicio',
new_name='descripcion_cantidad',
),
migrations.RemoveField(
model_name='cotizacionhistoricofecha',
name='fecha_actual',
),
migrations.RemoveField(
model_name='cotizacionhistoricofecha',
name='hora_actual',
),
migrations.RemoveField(
model_name='serviciomueble',
name='complejidad_servicio',
),
migrations.RemoveField(
model_name='serviciomueble',
name='incluido',
),
migrations.RemoveField(
model_name='serviciomueble',
name='monto_servicio',
),
migrations.RemoveField(
model_name='serviciomueble',
name='monto_servicio_asignado',
),
migrations.AddField(
model_name='contenedormueble',
name='tipo_de_contenido',
field=models.ForeignKey(to='contenedor.TipoDeContenido', default=1),
preserve_default=False,
),
migrations.AddField(
model_name='cotizacionhistoricofecha',
name='fecha',
field=models.DateTimeField(default='2016-04-01 00:00:00'),
preserve_default=False,
),
migrations.AddField(
model_name='cotizacionservicio',
name='cantidad_servicio',
field=models.DecimalField(max_digits=7, decimal_places=2, default=1),
preserve_default=False,
),
migrations.AddField(
model_name='fechadecotizacion',
name='obligatoria',
field=models.BooleanField(default=None),
),
migrations.AlterField(
model_name='cotizacionestado',
name='fecha_registro',
field=models.DateTimeField(auto_now_add=True),
),
]
| [
"yusnelvy@gmail.com"
] | yusnelvy@gmail.com |
8c08e9331e2a444c62e36c3a44d664c13b4c40df | 122cef3b7e5c65f4df2b5d17b88bbb6986c16b3f | /script_1.py | 462eafb9a1a567ee22ba559cf0538723cbbe4ca1 | [
"MIT"
] | permissive | markregine/FHIR_Python_fhirclient_testing_open | 98d843c6d2c223256afbb948c256d09fa7276e84 | 44061ee1522bfc7de5fe553683d6cb778c4b5f4e | refs/heads/master | 2020-04-30T02:44:19.943085 | 2019-03-19T18:13:00 | 2019-03-19T18:13:00 | 176,568,630 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | #!/usr/bin/env python
# coding: utf-8
# In[1]:
from fhirclient import client
import fhirclient.models.patient as p
import fhirclient.models.bundle as bundle
settings = {'app_id':'xxxx',
'api_base': 'https://r2.smarthealthit.org',
'patient_id': 'smart-1137192'}
# In[2]:
settings = {'app_id': 'my-app',
'api_base': 'https://fhir.sitenv.org/open/fhir',
'app_secret':'my-app-secret-123',
'launch_token': 'bXktYXBwOm15LWFwcC1zZWNyZXQtMTIz'
}
# In[3]:
smart = client.FHIRClient(settings=settings)
# In[4]:
#smart.ready
#smart.prepare()
#smart.ready
#smart.authorize_url
# In[5]:
patient = p.Patient.read('?_id=1&_format=json', smart.server)
patient.birthDate.isostring
# In[ ]:
| [
"noreply@github.com"
] | noreply@github.com |
5957173b293dfaea06fcdce0d133e9d754b9f80c | c030e3de3b6822c859f453456dc5f34845ae86da | /2.add-two-numbers.py | 511ca1d156495ca20cf1d8ba552cce7057e77629 | [] | no_license | xxli807/PythonForFun | 5ca07e5728d04e08019efe33f8bc126256ec05e6 | 5c980466146450309949388ca3ab0cedcf5f12fc | refs/heads/master | 2020-05-16T23:24:57.798569 | 2019-05-11T12:28:38 | 2019-05-11T12:28:38 | 183,364,431 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 795 | py | #
# @lc app=leetcode id=2 lang=python3
#
# [2] Add Two Numbers
#
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def addTwoNumbers(self, l1: ListNode, l2: ListNode) -> ListNode:
root = ListNode((l1.val + l2.val) % 10)
carry = (l1.val + l2.val) // 10
node = root
while l1.next or l2.next:
s = (l1.next.val if l1.next else 0) + (l2.next.val if l2.next else 0) + carry
node.next = ListNode(s % 10)
if l1.next:
l1 = l1.next
if l2.next:
l2 = l2.next
node = node.next
carry = s // 10
if carry:
node.next = ListNode(carry)
return root
| [
"lxx871030@gmail.com"
] | lxx871030@gmail.com |
dd9b515004d1932557315b736706692edce7c9f4 | a7a11639a6c7fddc702b55530b7a86e77796813c | /green_rose_shop/settings.py | 43f273257b1b5a1fa5f4fca49f6f37292fd1e069 | [] | no_license | venindobhasa/green_rose_shop | 2f9f9ec7eaa25e7e9f85d7d5da8c9dcf15e195c3 | 44ae92c29fe9eebbdc5c63177292907697263a17 | refs/heads/master | 2022-11-05T04:30:14.459444 | 2020-06-19T02:50:21 | 2020-06-19T02:50:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,458 | py | """
Django settings for green_rose_shop project.
Generated by 'django-admin startproject' using Django 2.2.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'dsiim%c=7h#lup128-qn+v^6j3qv!9&3d*e%%1s0jn**-2qmzc'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
#custom apps
'user_login',
'rest_framework',
'rest_framework.authtoken',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'green_rose_shop.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'green_rose_shop.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
# DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
# }
# }
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'green_rose',
'USER': 'mysql_username',
'PASSWORD': 'mysql_password',
'HOST': '127.0.0.1',
'PORT' : '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"saunghninsi91@gmail.com"
] | saunghninsi91@gmail.com |
c856a237edfe16a2af62af7f31bb6f4ea80ef390 | c96a5c821a24cdb41095d33ee734f94f35e81fb2 | /octoprint_rgbcontrol/__init__.py | 055ce2c4304023baf7719c59ff01a1c596c927cd | [] | no_license | mikedmor/Octoprint_RGBControl | 7915c806942f53c930ed672be71bdd7a2784699b | 0711896004079cc2efcdf74775a44f2892c16d0f | refs/heads/master | 2020-03-16T23:10:44.868686 | 2018-05-12T20:31:27 | 2018-05-12T20:31:27 | 133,068,066 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,607 | py | # coding=utf-8
from __future__ import absolute_import
import octoprint.plugin
import octoprint.settings
class RGBControlPlugin(octoprint.plugin.StartupPlugin,
octoprint.plugin.TemplatePlugin,
octoprint.plugin.SettingsPlugin,
octoprint.plugin.AssetPlugin):
def get_assets(self):
return dict(
js=["js/jscolor/jscolor.js","js/rgbcontrol.js"],
css=["css/rgbcontrol.css","css/iris.css"]
)
def get_template_configs(self):
return [
dict(type="generic", template="rgbcontrol.jinja2", custom_bindings=True)
]
##~~ Softwareupdate hook
def get_version(self):
return self._plugin_version
def get_update_information(self):
return dict(
multicam=dict(
displayName="RGBControl",
displayVersion=self._plugin_version,
# version check: github repository
type="github_release",
user="mikedmor",
repo="OctoPrint_RGBControl",
current=self._plugin_version,
# update method: pip
pip="https://github.com/mikedmor/OctoPrint_RGBControl/archive/{target_version}.zip"
)
)
__plugin_name__ = "RGBControl"
def __plugin_load__():
global __plugin_implementation__
__plugin_implementation__ = RGBControlPlugin()
global __plugin_hooks__
__plugin_hooks__ = {
"octoprint.plugin.softwareupdate.check_config": __plugin_implementation__.get_update_information
}
| [
"mikedmor@gmail.com"
] | mikedmor@gmail.com |
f340f6fe2ce9cef2755406e2d7327934041ad8c1 | 6fe477c7b32f0020a5fffe6affbc7546b16ab879 | /healthpoints/src/healthpoints/apps/tracks/migrations/0003_auto__add_field_activity_shard_id__add_field_activity_note_id.py | aca19183adb724bd430c79164d590c788b213d1b | [] | no_license | rootart/healthpoints | cb79cc4b8e3ceb9401eb5894518e026673f98545 | c33f8e2d0d62e66b3e967f3e464097482abebd91 | refs/heads/master | 2021-01-01T05:52:06.661165 | 2014-10-12T05:45:11 | 2014-10-12T05:45:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,555 | py | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Activity.shard_id'
db.add_column(u'tracks_activity', 'shard_id',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
# Adding field 'Activity.note_id'
db.add_column(u'tracks_activity', 'note_id',
self.gf('django.db.models.fields.CharField')(max_length=255, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Activity.shard_id'
db.delete_column(u'tracks_activity', 'shard_id')
# Deleting field 'Activity.note_id'
db.delete_column(u'tracks_activity', 'note_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'tracks.activity': {
'Meta': {'object_name': 'Activity'},
'average_speed': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'calories': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'distance': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '16', 'decimal_places': '6', 'blank': 'True'}),
'guID': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'location_country': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'moving_time': ('timedelta.fields.TimedeltaField', [], {'null': 'True', 'blank': 'True'}),
'note_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'polyline': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '100', 'db_index': 'True'}),
'resource_state': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'route': ('django.contrib.gis.db.models.fields.LineStringField', [], {'null': 'True', 'blank': 'True'}),
'shard_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'start_point': ('django.contrib.gis.db.models.fields.PointField', [], {'null': 'True', 'blank': 'True'}),
'total_elevation_gain': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['tracks'] | [
"dijakroot@gmail.com"
] | dijakroot@gmail.com |
762824112bf390cf4f8ff8ee2d484e6524fbca21 | c95f245a5252ec1185e13ef5d37ff599dd451fee | /telethon/network/connection/tcpfull.py | fd9fd1cf58e9bd9932053d283a5d676b226f6cd5 | [
"MIT"
] | permissive | perryyo/Telethon | 6f95ce09ad86a94c44fe697ba6d49df4914cb321 | 0046291254f9c96f8824ff7b42fa695fa3f71fc5 | refs/heads/master | 2020-04-07T17:08:15.994174 | 2019-02-11T07:13:44 | 2019-02-11T07:13:44 | 158,558,142 | 0 | 0 | MIT | 2018-11-21T14:12:22 | 2018-11-21T14:12:21 | null | UTF-8 | Python | false | false | 1,463 | py | import struct
from zlib import crc32
from .connection import Connection
from ...errors import InvalidChecksumError
class ConnectionTcpFull(Connection):
"""
Default Telegram mode. Sends 12 additional bytes and
needs to calculate the CRC value of the packet itself.
"""
def __init__(self, ip, port, *, loop, proxy=None):
super().__init__(ip, port, loop=loop, proxy=proxy)
self._send_counter = 0
async def connect(self, timeout=None, ssl=None):
await super().connect(timeout=timeout, ssl=ssl)
self._send_counter = 0 # Important or Telegram won't reply
def _send(self, data):
# https://core.telegram.org/mtproto#tcp-transport
# total length, sequence number, packet and checksum (CRC32)
length = len(data) + 12
data = struct.pack('<ii', length, self._send_counter) + data
crc = struct.pack('<I', crc32(data))
self._send_counter += 1
self._writer.write(data + crc)
async def _recv(self):
packet_len_seq = await self._reader.readexactly(8) # 4 and 4
packet_len, seq = struct.unpack('<ii', packet_len_seq)
body = await self._reader.readexactly(packet_len - 8)
checksum = struct.unpack('<I', body[-4:])[0]
body = body[:-4]
valid_checksum = crc32(packet_len_seq + body)
if checksum != valid_checksum:
raise InvalidChecksumError(checksum, valid_checksum)
return body
| [
"totufals@hotmail.com"
] | totufals@hotmail.com |
f956625b352f998eb26833b0e51195907af6cf82 | b3bac39420c4864de4a9f53a27845ba71e4f08fa | /App.py | 864555395f640864608ca50ba8d233c23cbcf07b | [] | no_license | nickbetke/Music-Mood-Recognition | bb2c6377de99b47b3e629bb02853412e63494e80 | 3811e0d3bb28ae4e06039f7d2ab463cbcc7a667f | refs/heads/master | 2021-02-10T02:55:55.878712 | 2020-10-17T14:35:53 | 2020-10-17T14:35:53 | 244,346,711 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,490 | py | import tkinter as tk # python 3
from tkinter import *
from tkinter.ttk import *
from tkinter import font as tkFont
from tkinter.filedialog import askopenfilename
from tkinter import messagebox
import numpy as np
from pydub import AudioSegment as pas
import librosa as lr
import joblib
global do
class SampleApp(tk.Tk):
def __init__(self, *args, **kwargs):
tk.Tk.__init__(self, *args, **kwargs)
self.title_font = tkFont.Font(family='Helvetica', size=20, weight="bold", slant="italic")
self.mood_font = tkFont.Font(family='Helvetica', size=16, slant="italic")
container = tk.Frame(self)
container.pack(side="top", fill="both", expand=True)
container.grid_rowconfigure(0, weight=1)
container.grid_columnconfigure(0, weight=1)
self.frames = {}
for F in (StartPage, PageOne):
page_name = F.__name__
frame = F(parent=container, controller=self)
self.frames[page_name] = frame
frame.grid(row=0, column=0, sticky="nsew")
self.show_frame("StartPage")
def show_frame(self, page_name):
frame = self.frames[page_name]
frame.tkraise()
class StartPage(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent,bg = "crimson")
self.controller = controller
label = tk.Label(self, text="Music Mood Recogniser", font=controller.title_font)
label.pack(side="top", fill="x", pady=20)
#for sending filename to other page
self.filename = ""
self.lbl = tk.Label(self, text="Select an Audio file", font=controller.mood_font, bg = "bisque", fg="black")
self.lbl.pack(side="top", fill="x", pady=200)
#==================================================
# style = Style()
# style.configure('TButton', font =
# ('calibri', 20, 'bold'),
# borderwidth = '4')
# style.configure('TButton', font =
# ('calibri', 10, 'bold', 'underline'),
# foreground = 'red')
#================================================
self.button1 = tk.Button(self, text="Browse", bg="greenyellow", command=self.clicked)
self.button1.place(x =250, y = 400)
self.button2 = tk.Button(self, text="Next", bg="chartreuse",command=lambda: controller.show_frame("PageOne"))
self.button2["state"] = "disabled"
self.button2.place(x =400, y = 400)
def lop(self):
PageOne.predict()
def clicked(self):
global do
self.filename = askopenfilename(title = "Select an audio file", filetypes = (("mp3 files", "*.mp3*"),
("m4a files", "*.m4a*"),
("All files", "*.*")))
if self.filename:
do.set(self.filename)
self.button2["state"] = "normal"
self.lbl.configure(text=self.filename)
print(self.filename)
#Frame 2
class PageOne(tk.Frame):
def __init__(self, parent, controller):
tk.Frame.__init__(self, parent, bg = "crimson")
self.controller = controller
# global do
# k1 = do.get()
label = tk.Label(self, text="Music Mood Recogniser", font=controller.title_font)
label.pack(side="top", fill="x", pady=20)
button = tk.Button(self, text="Go Back", bg="chartreuse", command=lambda: controller.show_frame("StartPage"))
button.pack()
button2 = tk.Button(self, text="Recognize", bg="chartreuse", command = self.testg)
button2.pack()
#mood Label
label2 = tk.Label(self, text="mood", font=controller.title_font, bg = "cornsilk", fg = "black")
label2.pack(side="top", fill="x", pady=50)
# label2.place(x=100 ,y=250)
#self.predict()
def testg(self):
global do
f1 = do.get()
print("mooded")
print(f1)
self.predict()
def predict(self):
global do
ad = do.get()
n = ad.split('/')
i = 'cutpiece.mp3'
i = n[-1]
if 'mp3' in i:
song = pas.from_mp3(ad)
if 'm4a' in i:
song = pas.from_file(ad)
ii = i[:-3] + 'wav'
op = song[:30000]
op.export('/home/nick/1PROjectX/trash/' + ii, format = 'wav')
feat1 = np.empty((0,181))
audio, freq = lr.load('/trash/' + ii)
stft = np.abs(lr.stft(audio))
mfcc = np.mean(lr.feature.mfcc(y = audio, sr = freq, n_mfcc=40).T, axis=0)
mel = np.mean(lr.feature.melspectrogram(audio, sr = freq).T, axis=0)
contrast = np.mean(lr.feature.spectral_contrast(S = stft, sr = freq).T, axis=0)
tonnetz = np.mean(lr.feature.tonnetz(y = lr.effects.harmonic(audio), sr = freq).T, axis=0)
ext_feat = np.hstack([mfcc, mel, contrast, tonnetz])
feat1 = np.vstack([feat1, ext_feat])
filename = '/46/decison-tree-model36.sav'
x = joblib.load(filename)
ww = x.predict(feat1)[0]
print(x.predict(feat1)[0])
label3 = tk.Label(self, text="DTree: " + str(ww), font='Helvetica 12 bold', bg = "lightsteelblue4", fg = "white")
label3.pack(side="top", fill="x", pady=20)
filename = '/46/linear-svm-model36.sav'
x = joblib.load(filename)
ww = x.predict(feat1)[0]
label4 = tk.Label(self, text="SVM: " + str(ww), font='Helvetica 12 bold', bg = "lightsteelblue4", fg = "white")
label4.pack(side="top", fill="x", pady=20)
filename = '/46/naive-bayes-model36.sav'
x = joblib.load(filename)
ww = x.predict(feat1)[0]
label5 = tk.Label(self, text="NaiveBayes: " + str(ww), font='Helvetica 12 bold', bg = "lightsteelblue4", fg = "white")
label5.pack(side="top", fill="x", pady=20)
filename = '/46/random-forest-model36.sav'
x = joblib.load(filename)
ww = x.predict(feat1)[0]
label6 = tk.Label(self, text="Randomforest: " + str(ww), font='Helvetica 12 bold', bg = "lightsteelblue4", fg = "white")
label6.pack(side="top", fill="x", pady=20)
# label2['text'] = 'mood: ' + str(ww)
if __name__ == "__main__":
app = SampleApp()
app.title("Music Mood Recogniser")
app.geometry("800x720+0+0")
app.resizable(True, True)
do = StringVar()
app.mainloop()
| [
"noreply@github.com"
] | noreply@github.com |
d8740f4f41ffea793ecdef962f03b008182d8ec7 | c07ecb7b177d3296580dec755e4c5a34c01f4132 | /120_Triangle.py | 3b5add816517340c3027d281b3ef7a59950e827b | [] | no_license | bokveizen/leetcode | 78c6e00a97ab9b658752dfcbf6380918ce5611d7 | 7c7eb77e23310563dbbabae4ff9c726620a8dd6d | refs/heads/master | 2021-07-09T19:59:15.338846 | 2021-03-18T06:28:14 | 2021-03-18T06:28:14 | 231,543,847 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # https://leetcode-cn.com/problems/triangle/
class Solution:
def minimumTotal(self, triangle: List[List[int]]) -> int:
n = len(triangle)
temp = triangle[-1] # triangle[n-1]
while True:
if n == 1:
return temp[0]
temp = [min(temp[i], temp[i + 1]) for i in range(n - 1)]
n -= 1
for i in range(n):
temp[i] += triangle[n - 1][i]
| [
"boqvezen97@kaist.ac.kr"
] | boqvezen97@kaist.ac.kr |
f25e7844fc3f123aff20df8ed51e37a677b10356 | 1107279f4447455a2fea8ff4341e856845cf8e57 | /testes/I2C_sniffer.py | faa21308ec2891c94910ec8d4369c332d9d2c356 | [] | no_license | rmborges/Distributed-Lighting-Control | a0d9cea26b5c538bc41d328e94bc805c97b56a9f | e59a8c5f299cbf739352d1db98787e934e9c4c16 | refs/heads/master | 2021-03-30T18:03:04.574228 | 2018-01-07T15:59:11 | 2018-01-07T15:59:11 | 113,502,261 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,913 | py | #!/usr/bin/env python
import time
import pigpio
class sniffer:
"""
A class to passively monitor activity on an I2C bus. This should
work for an I2C bus running at 100kbps or less. You are unlikely
to get any usable results for a bus running any faster.
"""
def __init__(self, pi, SCL, SDA, set_as_inputs=False):
"""
Instantiate with the Pi and the gpios for the I2C clock
and data lines.
If you are monitoring one of the Raspberry Pi buses you
must set set_as_inputs to False so that they remain in
I2C mode.
The pigpio daemon should have been started with a higher
than default sample rate.
For an I2C bus rate of 100Kbps sudo pigpiod -s 2 should work.
A message is printed for each I2C transaction formatted with
"[" for the START
"XX" two hex characters for each data byte
"+" if the data is ACKd, "-" if the data is NACKd
"]" for the STOP
E.g. Reading the X, Y, Z values from an ADXL345 gives:
[A6+32+]
[A7+01+FF+F2+FF+06+00-]
"""
self.pi = pi
self.gSCL = SCL
self.gSDA = SDA
self.FALLING = 0
self.RISING = 1
self.STEADY = 2
self.in_data = False
self.byte = 0
self.bit = 0
self.oldSCL = 1
self.oldSDA = 1
self.transact = ""
if set_as_inputs:
self.pi.set_mode(SCL, pigpio.INPUT)
self.pi.set_mode(SDA, pigpio.INPUT)
self.cbA = self.pi.callback(SCL, pigpio.EITHER_EDGE, self._cb)
self.cbB = self.pi.callback(SDA, pigpio.EITHER_EDGE, self._cb)
def _parse(self, SCL, SDA):
"""
Accumulate all the data between START and STOP conditions
into a string and output when STOP is detected.
"""
if SCL != self.oldSCL:
self.oldSCL = SCL
if SCL:
xSCL = self.RISING
else:
xSCL = self.FALLING
else:
xSCL = self.STEADY
if SDA != self.oldSDA:
self.oldSDA = SDA
if SDA:
xSDA = self.RISING
else:
xSDA = self.FALLING
else:
xSDA = self.STEADY
if xSCL == self.RISING:
if self.in_data:
if self.bit < 8:
self.byte = (self.byte << 1) | SDA
self.bit += 1
else:
self.transact += '{:02X}'.format(self.byte)
if SDA:
self.transact += '-'
else:
self.transact += '+'
self.bit = 0
self.byte = 0
elif xSCL == self.STEADY:
if xSDA == self.RISING:
if SCL:
self.in_data = False
self.byte = 0
self.bit = 0
self.transact += ']' # STOP
print (self.transact)
self.transact = ""
if xSDA == self.FALLING:
if SCL:
self.in_data = True
self.byte = 0
self.bit = 0
self.transact += '[' # START
def _cb(self, gpio, level, tick):
"""
Check which line has altered state (ignoring watchdogs) and
call the parser with the new state.
"""
SCL = self.oldSCL
SDA = self.oldSDA
if gpio == self.gSCL:
if level == 0:
SCL = 0
elif level == 1:
SCL = 1
if gpio == self.gSDA:
if level == 0:
SDA = 0
elif level == 1:
SDA = 1
self._parse(SCL, SDA)
def cancel(self):
"""Cancel the I2C callbacks."""
self.cbA.cancel()
self.cbB.cancel()
if __name__ == "__main__":
import time
import pigpio
import I2C_sniffer
pi = pigpio.pi()
s = I2C_sniffer.sniffer(pi,3,2, False) # leave gpios 1/0 in I2C mode
time.sleep(60000)
s.cancel()
pi.stop()
| [
"rafael.m.borges@tecnico.ulisboa.pt"
] | rafael.m.borges@tecnico.ulisboa.pt |
b5f7d6f9fb35ce51fcbddc2f6bdffd4aaadeda7a | 5f59fb34fe9c4bcca96d18a765063fefa4bf8e8d | /lib/eval.py | 4499eec599ad5788222d5fd94fcd30d239ca1123 | [] | no_license | chrischoy/HighDimConvNets | 674ee682aa0c3b37b2e2e0e0b1fc36469b1978d6 | bd8f03150b4d639db61109a93c37f3be0dcaec38 | refs/heads/master | 2023-08-06T22:41:06.052791 | 2021-09-15T17:23:11 | 2021-09-15T17:23:21 | 263,797,270 | 39 | 4 | null | null | null | null | UTF-8 | Python | false | false | 2,020 | py | import torch
import numpy as np
def pdist(A, B, dist_type='L2', transposed=False):
"""
transposed: if True, F0, F1 have D x N. False by default N x D.
"""
if 'L2' in dist_type:
if transposed:
D2 = torch.sum((A.unsqueeze(2) - B.unsqueeze(1)).pow(2), 0)
else:
D2 = torch.sum((A.unsqueeze(1) - B.unsqueeze(0)).pow(2), 2)
if dist_type == 'L2':
return torch.sqrt(D2 + np.finfo(np.float32).eps)
elif dist_type == 'SquareL2':
return D2
else:
raise NotImplementedError('Not implemented')
def find_nn_gpu(F0, F1, nn_max_n=-1, return_distance=False, dist_type='SquareL2', transposed=False):
"""
transposed: if True, F0, F1 have D x N. False by default N x D.
"""
# Too much memory if F0 or F1 large. Divide the F0
if nn_max_n > 1:
if transposed:
N = F0.shape[1]
else:
N = len(F0)
C = int(np.ceil(N / nn_max_n))
stride = nn_max_n
dists, inds = [], []
for i in range(C):
if transposed:
dist = pdist(F0[:, i * stride:(i + 1) * stride], F1, dist_type=dist_type, transposed=transposed)
else:
dist = pdist(F0[i * stride:(i + 1) * stride], F1, dist_type=dist_type, transposed=transposed)
min_dist, ind = dist.min(dim=1)
dists.append(min_dist.detach().unsqueeze(1).cpu())
inds.append(ind.cpu())
if C * stride < N:
if transposed:
dist = pdist(F0[:, C * stride:], F1, dist_type=dist_type, transposed=transposed)
else:
dist = pdist(F0[C * stride:], F1, dist_type=dist_type, transposed=transposed)
min_dist, ind = dist.min(dim=1)
dists.append(min_dist.detach().unsqueeze(1).cpu())
inds.append(ind.cpu())
dists = torch.cat(dists)
inds = torch.cat(inds)
assert len(inds) == N
else:
dist = pdist(F0, F1, dist_type=dist_type, transposed=transposed)
min_dist, inds = dist.min(dim=1)
dists = min_dist.detach().unsqueeze(1).cpu()
inds = inds.cpu()
if return_distance:
return inds, dists
else:
return inds
| [
"cchoy@nvidia.com"
] | cchoy@nvidia.com |
bf811162014e14e26b71ed53ffec58e618d594a3 | 2157782cf5875767f8d1fe0bb07243da2e87600d | /send_email/email_helper.py | 5012f4ab74d9a69b947ea3e386bf2d903abaa39f | [] | no_license | mouday/SomeCodeForPython | 9bc79e40ed9ed851ac11ff6144ea080020e01fcd | ddf6bbd8a5bd78f90437ffa718ab7f17faf3c34b | refs/heads/master | 2021-05-09T22:24:47.394175 | 2018-05-11T15:34:22 | 2018-05-11T15:34:22 | 118,750,143 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,112 | py | #email_helper.py
'''
参考:https://www.liaoxuefeng.com/wiki/0014316089557264a6b348958f449949df42a6d3a2e542c000/001432005226355aadb8d4b2f3f42f6b1d6f2c5bd8d5263000
封装成简单邮件发送模块
'''
from email import encoders
from email.header import Header
from email.mime.text import MIMEText
from email.utils import parseaddr, formataddr
import smtplib
debug = True # debug开关
def debug_info(text):
if debug == True:
print(text)
class EmailClient(object):
'邮件发送端初始化类'
def __init__(self, smtp_server):
'初始化服务器地址'
self._smtp_server = smtp_server
self.addrs = [] # 邮件地址列表, 格式(addr, name)
def login(self, from_addr, password, from_name="admin"):
'登录'
self._from_addr = from_addr
self._password = password
self._from_name = from_name
try:
self.server = smtplib.SMTP(self._smtp_server, 25)
#server.set_debuglevel(1)
self.server.login(self._from_addr, self._password)
except Exception as e:
return -1 # 登录失败
debug_info("登录失败")
else:
return 0 # 登录成功
debug_info("登录成功")
def send(self, title, text, to_addr, to_name=None):
'发送邮件'
if to_name == None: to_name=to_addr
try:
# 接受方信息
msg = MIMEText(text, 'plain', 'utf-8')
msg['From'] = self._format_addr('%s<%s>' % (self._from_name,self._from_addr))
msg['To'] = self._format_addr('%s <%s>' % (to_name,to_addr))
msg['Subject'] = Header(title, 'utf-8').encode()
# 发送内容
self.server.sendmail(self._from_addr, to_addr, msg.as_string())
return 0
except Exception as e:
debug_info(e)
return -1
def add_address(self, addr, name=None):
'增加地址到地址列表'
if name==None: name = addr
self.addrs.append((addr, name))
def send_all(self, title, text):
'发送所有人'
success = 0
fail = 0
for addr, name in self.addrs:
ret = self.send(title, text, addr, name)
if ret == 0:
success += 1
else:
fail += 1
return success, fail
def __del__(self):
'析构'
self.server.quit()
def _format_addr(self, s):
'格式化地址'
name, addr = parseaddr(s)
return formataddr((Header(name, 'utf-8').encode(), addr))
if __name__ == '__main__':
email_client=EmailClient("smtp.163.com") # 邮箱服务器地址
email_client.login("username", "password", "name") # 登陆
email_client.add_address("email") # 增加收件人
email_client.add_address("email")
email_client.add_address("email")
# 发送
success, fail = email_client.send_all("邮件标题", "邮件内容,试试看能不能发送出去")
print("success:", success, "fail:", fail) # 返回发送结果
| [
"1940607002@qq.com"
] | 1940607002@qq.com |
2bc647123df644c429a647698050cb197c682e88 | 5b5a49643c75aa43d5a876608383bc825ae1e147 | /tests/lists/p121_test.py | 22041a3cf5ee7085bd6f9c855959da66c5eaec06 | [] | no_license | rscai/python99 | 281d00473c0dc977f58ba7511c5bcb6f38275771 | 3fa0cb7683ec8223259410fb6ea2967e3d0e6f61 | refs/heads/master | 2020-04-12T09:08:49.500799 | 2019-10-06T07:47:17 | 2019-10-06T07:47:17 | 162,393,238 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | from python99.lists.p121 import insert_at, insert_at_mutable
def test_insert_at():
assert insert_at([1, 2, 3, 4, 5, 6], 2, 'a') == [1, 'a', 2, 3, 4, 5, 6]
assert insert_at([1, 2, 3, 4, 5, 6], 1, 'a') == ['a', 1, 2, 3, 4, 5, 6]
assert insert_at([1, 2, 3, 4, 5, 6], 7, 'a') == [1, 2, 3, 4, 5, 6, 'a']
def test_insert_at_mutable():
assert insert_at([1, 2, 3, 4, 5, 6], 2, 'a') == [1, 'a', 2, 3, 4, 5, 6]
assert insert_at([1, 2, 3, 4, 5, 6], 1, 'a') == ['a', 1, 2, 3, 4, 5, 6]
assert insert_at([1, 2, 3, 4, 5, 6], 7, 'a') == [1, 2, 3, 4, 5, 6, 'a']
| [
"ray.s.cai@icloud.com"
] | ray.s.cai@icloud.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.