blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
09e0f33fc46c60ce0164099178eab420dc176433 | 2f2f9b64d43cb3c98d4c338be07374aae0e64db9 | /lianjia/spiders/lianjia.py | a5ab565d0d8443091159c5fd225578c91526b4f5 | [] | no_license | RayZhanag/real-estate-study | a6c079f1d83b993770ee0b4d91f2ea79897a92c9 | 778a38bd4c14a07152d235eb1060e1a0bd98e3a4 | refs/heads/master | 2021-04-15T10:44:05.736710 | 2018-03-26T15:18:53 | 2018-03-26T15:18:53 | 126,726,330 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,813 | py | # This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
from scrapy.spiders import CrawlSpider,Rule
#from scrapy.loader import ItemLoader
from lianjia.items import LianjiaItem
from scrapy.linkextractors import LinkExtractor
from scrapy import Spider,Selector
import scrapy,logging,re
logger = logging.getLogger('LianJia')
class LianjiaSpider(Spider):
name='lianjia'
#allowed_domains=["hz.lianjia.com/ershoufang"]
start_urls=["https://hz.lianjia.com/ershoufang/"]
def __init__(self):
self.main_url="https://hz.lianjia.com"
self.current_url="https://hz.lianjia.com/ershoufang/pg"
#response.css("ul.sellListContent li.clear div.info.clear div.title a::attr('href')").extract()
def parse(self,response):
links=response.css("div.position div[data-role=ershoufang] div a::attr('href')").extract()
zone_urls=map(lambda item: self.main_url+item, links)
for url in zone_urls:
yield scrapy.Request(url,callback=self.parse_zone)
def parse_zone(self,response):
def get_page_number_range(response):
str_of_dict=response.xpath("//div/div[@page-data]/@page-data").extract_first() #put it inside "try" to let it seems comfortable.
page_dict=eval(str_of_dict)
total_page=page_dict.get("totalPage")
return total_page
total_page=get_page_number_range(response)
#parse current/first page
self.parse_page(response)
for i in range(2,1+total_page):
yield scrapy.Request(response.url+"pg"+str(i),self.parse_page)
def parse_page(self,response):
urls=response.css("ul.sellListContent li.clear div.info.clear div.title a::attr('href')").extract()
for url in urls:
yield scrapy.Request(url,callback=self.parse_content) #visit every house source
def parse_content(self,response):
logger.info("visit page: " + response.url)
baseProperty=response.css("div.introContent div.base div.content ul li::text").extract() #基本属性
transactionProperty=response.css("div.introContent div.transaction div.content ul li span:nth-child(2)::text ").extract()#交易属性
district,area=response.css("div.aroundInfo div.areaName span.info a::text").extract()
communityName=response.css("div.overview div.content div.aroundInfo div.communityName a.info::text").extract_first()
totalPrice=response.css("div.price span.total::text").extract_first()
unitPrice=response.css("div.price div.text div.unitPrice span.unitPriceValue::text").extract_first()
if len(transactionProperty) == 9 and len(baseProperty)== 12:
LianJia=LianjiaItem()
LianJia["apartmentLayout"]=baseProperty[0] #房屋户型
LianJia["constructionArea"]=baseProperty[2] #建筑面积
LianJia["floorArea"]=baseProperty[4] #套内面积
LianJia["houseOrientation"]=baseProperty[6] #朝向
LianJia["decoration"]=baseProperty[8] #装修
LianJia["elevator"]=baseProperty[10] #电梯
LianJia["layoutStructure"]=baseProperty[3] #even level;jump layer;duplicate
LianJia["buildingType"]=baseProperty[5] #建筑类型
LianJia["buildingStruction"]=baseProperty[7] #建筑结构
LianJia["staircasesRatio"]=baseProperty[9] #梯户比
LianJia["useRight"]=baseProperty[11] #产权年限
LianJia["listDate"]=transactionProperty[0] #挂牌时间
LianJia["lastTradeDate"]=transactionProperty[2] #上次交易
LianJia["mortgage"]=transactionProperty[6] #抵押信息
LianJia["houseSourceCode"]=transactionProperty[8] #房源编码
LianJia["ownershipTransaction"]=transactionProperty[1] #交易权属
LianJia["propertyOwner"]=transactionProperty[5] #产权所属
LianJia["area"]=area #所在区域
LianJia["communityName"]=communityName #小区名称
LianJia["totalPrice"]= totalPrice #总价
LianJia["unitPrice"]= unitPrice #单价
LianJia["district"]= district #所在区县
yield LianJia
else:
pass
| [
"13671777883@139.com"
] | 13671777883@139.com |
bf79ce36fe3ae9c2d2fbadf3e22fb7a24c753613 | d6f65889a92d5dfb77fec7abf362307d738b876e | /Labcli/App/examenes/forms.py | d4eeac9fe60626107495aa4fb620ef13bfacb7b7 | [] | no_license | astridlisarm1995/LABCLI | bd8c7c21b3712a8b52f4f08f26e0352516197500 | ab1b904a377843f9d423614fc2d8e70040237cb3 | refs/heads/master | 2020-06-03T04:51:31.120123 | 2019-06-12T02:41:39 | 2019-06-12T02:41:39 | 191,445,746 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 866 | py | from django import forms
from App.examenes.models import Examenes
class ExamenForm(forms.ModelForm):
class Meta:
model = Examenes
fields = [
'nombre',
'tipo',
'precio',
'precio_laboratorio',
'rango',
]
labels = {
'nombre': 'Nombre de Examen',
'tipo': 'Tipo de examen',
'precio': 'Precio de Examen',
'precio_laboratorio': 'Valor de examen',
'rango': 'Rango de examen'
}
widgets = {
'nombre': forms.TextInput(attrs={'class':'form-control'}),
'precio': forms.TextInput(attrs={'class':'form-control'}),
'precio_laboratorio': forms.TextInput(attrs={'class':'form-control'}),
'rango': forms.TextInput(attrs={'class': 'form-control'}),
} | [
"astrid.rodriguez15@gmail.com"
] | astrid.rodriguez15@gmail.com |
ae794f51c213d3b803cc088b8c557a9b5dd56371 | b7b7342a7369117cb98de55f697153e875eecbbc | /example.py | 098d2dcfbcfa00aab247a6a943daee18f895b5ad | [
"MIT"
] | permissive | zzygyx9119/nglesspy | 953a3ab084b7df72df522076b3cd094e4a02ba12 | 3cfa28ea8fe2fdc3c08ac80a5949844544489cc9 | refs/heads/master | 2022-11-18T20:38:45.927728 | 2020-07-16T19:56:42 | 2020-07-16T19:57:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 444 | py | from ngless import NGLess
sc = NGLess.NGLess('0.0')
sc.import_('mocat', '0.0')
e = sc.env
e.sample = sc.load_mocat_sample_('testing')
@sc.preprocess_(e.sample, using='r')
def proc(bk):
bk.r = sc.substrim_(bk.r, min_quality=25)
sc.if_(sc.len_(bk.r) < 45,
sc.discard_)
e.mapped = sc.map_(e.sample, reference='hg19')
e.mapped = sc.select_(e.mapped, keep_if=['{mapped}'])
sc.write_(e.mapped, ofile='ofile.sam')
sc.run()
| [
"luis@luispedro.org"
] | luis@luispedro.org |
4e9cf1128c2b20e84ccb9cb2a506b60bd1d92535 | 4d4899e54a8a97fad2039350f16c50245a4e0810 | /source/todoapp/migrations/0003_task_full_description.py | 09595c6bfdf9b7d031b76dd1d3c8ead3665d189d | [] | no_license | UuljanAitnazarova/python_homework_45_To_do_list | f7f4925bff987d5a13e52b48f745a4bddb72c440 | 592bd5ad8ac61f1147e7bf5ffe49f899a9a0c7cd | refs/heads/master | 2023-03-12T12:28:24.234398 | 2021-03-04T08:52:25 | 2021-03-04T08:52:25 | 341,508,360 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 469 | py | # Generated by Django 3.1.7 on 2021-02-26 10:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todoapp', '0002_auto_20210223_1242'),
]
operations = [
migrations.AddField(
model_name='task',
name='full_description',
field=models.TextField(default='полное описание задания'),
preserve_default=False,
),
]
| [
"u.aitnazarova@gmail.com"
] | u.aitnazarova@gmail.com |
d22b1ae66a79c405434e7bf49bdaf4f2af845bf8 | 72694df198665321ab42ef52db58ad0291f7157e | /test/conftest.py | 4e2e478e596503116653883308d6086cd517939b | [] | no_license | akshay2424/flask_jwt_auth_and_role_manage | cdc7a9ef6467107a985bd751542541877afd2a33 | 10db863f2437f375bdf456bff5af9bd615b64be4 | refs/heads/master | 2023-08-16T13:31:13.319032 | 2021-09-27T15:29:03 | 2021-09-27T15:29:03 | 409,444,190 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 125 | py | import pytest
from app import app
@pytest.fixture
def client():
with app.test_client() as client:
yield client | [
"demo@gmail.com"
] | demo@gmail.com |
8b8fb0da9d623937349fe29a780f9a3da79a7347 | af5edf05e8895a3c3457259421b83a1c1b6f5d32 | /wineGraph4.py | 5a363731d7c8d4e7da6cac5ee2952909fb9c9443 | [] | no_license | sirawit-suk/Probstat-Analysis-Graphs | 513a2186f2a9d302243d657577ab71ee5b777266 | 3730d3063d5b81647ed57438e72263c2e4698214 | refs/heads/main | 2023-07-07T00:51:19.282573 | 2021-08-10T10:37:51 | 2021-08-10T10:37:51 | 330,959,472 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,200 | py | import matplotlib.pyplot as plt # plot graphs
import pandas # colection for data
from scipy import stats
import math
import statistics as stc
# Init Style of Graph and Insert table of data in form of columns
plt.style.use('bmh')
columns = pandas.read_csv('testgraphredwine.csv')
# All columns
x = columns['alcohol'] # x (independent variable) = alcohol
y = columns['quality'] # y (dependent variable) = quality
'''
#Graphs
1. Histogram
2. Box Plot
3. Stem and Leave
4. XY (Scatter) Plot (suitable variable)(describe more)
Detail
1. Name of Graph
2. Name of Axis
3. Suitable variable
4. Identify Outlier
'''
'''
y = mx + c
m = SSxy/SSxx
SSxy = sum(x*y) + sum(x)*sum(y)/n
SSxx = sum(x*x) + sum(x)*sum(x)/n
SSyy = sum(y*y) + sum(y)*sum(y)/n
r = SSxy / sqrt(SSxx*SSyy)
'''
#y = m*x + c
n = len(x)
y_bar = stc.mean(y)
x_bar = stc.mean(x)
# method 1 (easy to calculate)
SSxy = sum(x*y) - sum(x)*sum(y)/n
SSxx = sum(x*x) - sum(x)*sum(x)/n
SSyy = sum(y*y) - sum(y)*sum(y)/n
# method 2 (easy to understand)
SSxy_mean = sum(( x - x_bar) * (y - y_bar))
SSxx_mean = sum(( x - x_bar) ** 2)
SSyy_mean = sum(( y - y_bar) ** 2)
# find slope (m)
m = SSxy / SSxx
# find intercept (c)
c = y_bar - m * x_bar
# find correlation coefficient (r)
r = SSxy / math.sqrt(SSxx*SSyy)
y_estimate = m * x + c
SSerror = sum((y-y_estimate) ** 2) # ( actual value - estimate value ) ^ 2 # graph -> ('/.) # value is + or - change to ^2
Rsquare = 1 - SSerror / SSyy
SSerrorInvert = sum((y_estimate-y_bar) ** 2) # ( estimate value - mean value ) ^ 2 # graph -> (-/-) # sum is = 0 if not ^2
RsquareV2 = SSerrorInvert / SSyy
# conclude: SSerrorInvert = (m * SSxy) = sum((y_estimate-y_bar) ** 2)
# conclude: SSerror = SSyy - (m * SSxy) = sum((y-y_estimate) ** 2)
RsquareV3 = (m * SSxy) / SSyy
stdERR = math.sqrt(SSerror / (n-2))
stdERRV2 = math.sqrt((SSyy - (m*SSxy)) / (n-2))
print('------------ My Own Calculation --------------')
print("slope (m) = {:.2f}".format(m))
print("intercept (c) = {:.2f}\n".format(c))
print("n = {:.2f}".format(n))
print("y_bar = {:.2f}".format(y_bar))
print("x_bar = {:.2f}\n".format(x_bar))
print("SSxy_calc = {:.4f}".format(SSxy))
print("SSxy_mean = {:.4f}".format(SSxy_mean))
print("SSxx_calc = {:.4f}".format(SSxx))
print("SSxx_mean = {:.4f}".format(SSxx_mean))
print("SSyy_calc = {:.4f}".format(SSyy))
print("SSyy_mean = {:.4f}\n".format(SSyy_mean))
print("r_value (r) = {:.4f}".format(r))
print("r_value^2 (r^2) = {:.4f}".format(r ** 2))
#print(f"y_estimate = {y_estimate}\n")
print("SSerror = {:.4f}".format(SSerror))
print("SSerrorInvert = {:.4f}".format(SSerrorInvert))
print("Rsquare = {:.4f}".format(Rsquare))
print("RsquareV2 = {:.4f}".format(RsquareV2))
print("RsquareV3 = {:.4f}\n".format(RsquareV3))
print("stdERR = {:.4f}".format(stdERR))
print("stdERRV2 = {:.4f}\n".format(stdERRV2))
print('--------------- Use Library Function ------------------')
slope, intercept, r_value, p_value, std_err = stats.linregress(x,y)
print("slope (m) = {:.2f}".format(slope))
print("intercept (c) = {:.2f}\n".format(intercept))
print("r_value (r) = {:.4f}".format(r_value))
print("r_value^2 (r^2) = {:.4f}\n".format(r_value ** 2))
print("p_value (p) = {:.4f} # Not use ".format(p_value))
print("std_err = {:.4f} # this is wrong, Don't use this one, IDK why ? use 0.7104 instead\n".format(std_err))
print(r"{} = {:.2f}x + {:.2f}, r^2 = {:.4f}, p = {:.4f}".format("y", slope, intercept, r_value **2, p_value))
# for title text
lineEquation = r"${} = {:.2f}x + {:.2f}, r^2 = {:.4f}, p = {:.4f}$".format("\^{y}", slope, intercept, r_value **2, p_value)
# plot graph
xMin_xMax = [min(x),max(x)]
yMin_yMax = [slope*min(x) + intercept, slope*max(x) + intercept] # y = mx + c
# Scatter Plot
figure, scat = plt.subplots(figsize=(12, 8))
plt.tight_layout(pad=4)
scat.set_title('The Relation between Alcohol and Quality in Red Wine (Scatter plot)\n' +lineEquation)
scat.set_xlabel('Alcohol (%/volume)') #indepentdent
scat.set_ylabel('Quality (lv.1-10)') #dependent
scat.scatter(x, y)
scat.plot(xMin_xMax, yMin_yMax, alpha=.5, color="green")
# Show
plt.show() | [
"popeye-zz@hotmail.com"
] | popeye-zz@hotmail.com |
da12d3c31d5a691fa0565cbeff1fae9483d8d99b | 93e9bbcdd981a6ec08644e76ee914e42709579af | /devide_and_conquer/315_Count_of_Smaller_Numbers_After_Self.py | e99b6ea88b209f2414fdefe3366889f075cf4bbd | [] | no_license | vsdrun/lc_public | 57aa418a8349629494782f1a009c1a8751ffe81d | 6350568d16b0f8c49a020f055bb6d72e2705ea56 | refs/heads/master | 2020-05-31T11:23:28.448602 | 2019-10-02T21:00:57 | 2019-10-02T21:00:57 | 190,259,739 | 6 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,254 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
https://leetcode.com/problems/count-of-smaller-numbers-after-self/description/
You are given an integer array nums and you have to return a new counts array.
The counts array has the property where counts[i] is the number
of smaller elements to the right of nums[i].
Example:
Input: [5,2,6,1]
Output: [2,1,1,0]
Explanation:
To the right of 5 there are 2 smaller elements (2 and 1).
To the right of 2 there is only 1 smaller element (1).
To the right of 6 there is 1 smaller element (1).
To the right of 1 there is 0 smaller element.
"""
class Solution(object):
def countSmaller(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
def sort(enum):
half = len(enum) / 2
if half:
left, right = sort(enum[:half]), sort(enum[half:])
for i in range(len(enum))[::-1]:
if not right or left and left[-1][1] > right[-1][1]:
smaller[left[-1][0]] += len(right)
enum[i] = left.pop()
else:
enum[i] = right.pop()
return enum
smaller = [0] * len(nums)
sort(list(enumerate(nums)))
return smaller
def rewrite(self, nums):
"""
:type nums: List[int]
:rtype: List[int]
"""
"""
想法: 用merge sort
"""
result = [0] * len(nums)
def msort(enum):
half = len(enum) / 2
if half:
left = msort(enum[:half])
right = msort(enum[half:])
for idx in range(len(enum))[::-1]: # (4,3,2,1,0)
if (not right) or (left and left[-1][1] > right[-1][1]):
result[left[-1][0]] += len(right)
enum[idx] = left.pop()
else:
enum[idx] = right.pop()
return enum
msort(list(enumerate(nums))) # [(0, 3),(1,7),...]
return result
def build():
return [-1,-1]
return [5,2,6,1]
if __name__ == "__main__":
s = Solution()
print(s.countSmaller(build()))
print(s.rewrite(build()))
| [
"vsdmars@gmail.com"
] | vsdmars@gmail.com |
818ff902546aedaae8aaab629c81c725f1bf8b91 | a94e8b83bb2a4ccc1fffbd28dd18f8783872daab | /Mock CCC/Mock CCC19S2 Pusheens Puzzle Present.py | aec48957b55fe819a6e6614885614ac56d2f56d4 | [] | no_license | MowMowchow/Competitive-Programming | d679c1fe2d7d52940dc83a07dc8048922078704e | 0ec81190f6322e103c2ae0ad8c3935bd4cdfff46 | refs/heads/master | 2022-03-03T03:01:03.383337 | 2022-02-17T07:27:45 | 2022-02-17T07:27:45 | 184,678,148 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 329 | py | import sys
import math
n = int(sys.stdin.readline())
grid = [[int(x) for x in sys.stdin.readline().split()] for x in range(n)]
counter = 1
sublength = 0
for row in grid:
for curr in range(n-1):
if row[curr] != row[curr+1] - 1:
sublength += 1
break
#counter += 1
print(sublength) | [
"darkstar.hou2@gmail.com"
] | darkstar.hou2@gmail.com |
90696fe9cb8a128fc2377816dad24500a6a4d2f0 | d71a761ec8b1fcd96f84c9d333a44a50d493e161 | /carts/migrations/0004_auto_20200416_1810.py | 3401dec342287a80268e45941203515897a5959a | [] | no_license | rahulchoudhary1602/Ecommerce | b1dd78511c47b1a0bd1deed2e72b433ecf27e94e | da14e22195345808be6e3343b1db5ff3adc2b221 | refs/heads/master | 2023-05-29T14:03:50.958774 | 2021-06-19T13:54:22 | 2021-06-19T13:54:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 911 | py | # Generated by Django 2.2 on 2020-04-16 12:40
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('carts', '0003_auto_20200416_1424'),
]
operations = [
migrations.RemoveField(
model_name='cart',
name='items',
),
migrations.RemoveField(
model_name='cart',
name='product',
),
migrations.AddField(
model_name='cartitem',
name='cart',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='carts.Cart'),
),
migrations.AddField(
model_name='cartitem',
name='linetotal',
field=models.DecimalField(decimal_places=2, default=10.99, max_digits=100),
),
]
| [
"noreply@github.com"
] | noreply@github.com |
f82e3187de7f76ab1c1c5f060f89757b8d7032b4 | 4fb351c236aa040ba36b993c74df5d2c30f9123c | /comp_vi.py | 1d4eb89b89652d1f58d6d8d80b65f68bb0dc78d6 | [] | no_license | Y-Lou/mathematica_-Python-_- | 135dfb968dccbbcadb42c8d1c9b245c1355a88b7 | d154f6a5d1c0107f80b7f3a53367458e357b83d5 | refs/heads/main | 2023-01-21T21:02:40.868675 | 2020-11-24T11:49:05 | 2020-11-24T11:49:05 | 315,611,511 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,104 | py | """
计算vi的模块
"""
gradConv = 100
IXprint = np.argsort(np.array(JS.quzhi_1(eHF,Vspc)))
EIter = [0]*doTurns
timeIter = [0]*doTurns
abA1 = JS.Ab_All_1(Omega)
abA2 = JS.Ab_All_2(Omega)
time_start=time.time()
Iter = doTurns
for i in range(Iter):
print("iter = %d" % int(i+1))
print("gradConv = %d" % gradConv)
vi = JS.Setprecision(vi,0,Myprecision)
print(" Precision[vi] = ",Myprecision)
v2i,chiMp1a = JS.FoldList(vi,Omega,Nval,Myprecision)
va2mvb2 = JS.List_Sub(JS.quzhi_1(v2i,abA1),JS.quzhi_1(v2i,abA2),0)
print(" Precision[chiMp1a] = ",Myprecision)
chiNa = chiMp1a[Nval][:]
chiNm1a = chiMp1a[Nval-1][:]
chiNm2a = chiMp1a[Nval-2][:]
chiNm3a = chiMp1a[Nval-3][:]
v2chiNm3a = JS.List_CP(v2i,chiNm3a,1)
RN = list(range(1,Nval+2))
cv12 = JS.List_DM(chiMp1a,v2i,1)
chiMp1 = JS.List_CP(RN,cv12,1)
chiMp1.pop()
chiMp1.insert(0,1)
print(" Precision[chiMp1] = ",Myprecision)
chiN = chiMp1[int(Nval)]
ni = JS.List_Sub(1,JS.List_D(chiNa,chiN,0),2)
N = JS.Total(ni,1)-Nval
print(" N error = ",N)
print(" ni = ",np.array(ni).astype(float))
if calAbg is False:
if iter == 3:
break
time_start1=time.time()
v2chiNm1a = JS.List_CP(v2i,chiNm1a,1)
chiNm1ab = JS.List_D(JS.List_Sub(JS.quzhi_1(v2chiNm1a,abA1),JS.quzhi_1(v2chiNm1a,abA2),0),va2mvb2,1)
Gc = JS.List_CP(GabList,chiNm1ab,1)
ab1 = JS.Ab(Omega,1)
mymat = JS.SparseArray(ab1,Gc,Omega,0)
mymatT = np.array(mymat).T
GvchiNm1 = JS.List_DM(JS.List_Add(mymat,mymatT,2),vi,2)
exp1gEa1 = JS.List_D(JS.List_CP(GvchiNm1,Nval**2,0),chiNa,1)
exp1viEst = JS.List_D(JS.Setprecision(JS.List_CP(JS.List_Sub(JS.quzhi_1(eHF,Vspc),FermiHF,1),-2,0),0,Myprecision),exp1gEa1,1)
exp2gEa1 = JS.List_D(GvchiNm1,chiNm1a,1)
exp2viEst = JS.List_D(exp2gEa1,JS.Setprecision(JS.List_CP(JS.List_Sub(JS.quzhi_1(eHF,Vspc),FermiHF,1),-2,0),0,Myprecision),1)
bign = JS.Position(ni,0.5,0)
newviEst = exp2viEst
newviEst = JS.Replace(newviEst,exp1viEst,bign,0)
time_end1 = time.time()
print(" Precision[newviEst] = ",Myprecision)
print(" time of estimate vi = ",time_end1-time_start1)
print(" approx vi = ",newviEst)
newviEst = JS.Replace(newviEst,Lastvi,LastV,1)
vi = JS.Setprecision(newviEst,0,Myprecision)
print(" setted vi = ",np.array(vi).astype(float))
timeIter[i] = time_end1-time_start
continue
print(" Begin chiNm3,2,1ab: ")
time_start2=time.time()
chiNm3ab = JS.List_D(JS.List_Sub(JS.quzhi_1(v2chiNm3a,abA1),JS.quzhi_1(v2chiNm3a,abA2),0),va2mvb2,1)
print(" Precision[chiNm3ab] = ",Myprecision)
time_end2 = time.time()
print(" time of chiNm3ab = ",time_end2-time_start2)
Nvc3 = JS.List_CP(JS.quzhi_1(JS.List_CP(v2i,(Nval-2)**2,0),abA1),chiNm3ab,1)
chiNm2ab = JS.List_Sub(JS.quzhi_1(chiNm2a,abA2),Nvc3,0)
Nvc2 = JS.List_CP(JS.quzhi_1(JS.List_CP(v2i,(Nval-1)**2,0),abA1),chiNm2ab,1)
chiNm1ab = JS.List_Sub(JS.quzhi_1(chiNm1a,abA2),Nvc2,0)
print(" Precision[chiNm2ab] = ",Myprecision)
print(" Precision[chiNm1ab] = ",Myprecision)
time_end3 = time.time()
print(" time of chiNm3,2,1ab = ",time_end3-time_start2)
v2Ge2 = JS.List_CP(JS.List_Add(JS.List_CP(ea,2,0),Gaa,0),v2i,1)
vvGabList = JS.List_CP(JS.List_CP(GabList,JS.quzhi_1(vi,abA1),1),JS.quzhi_1(vi,abA2),1)
v2v2LamabList = JS.List_CP(JS.List_CP(LamabList,JS.quzhi_1(v2i,abA1),1),JS.quzhi_1(v2i,abA2),1)
aveE0 = JS.List_DM(v2Ge2,chiNm1a,0)
aveE1 = 2*JS.List_DM(vvGabList,chiNm1ab,0)
aveE2 = (2*(Nval-1)**2)*JS.List_DM(v2v2LamabList,chiNm2ab,0)
aveE = ((aveE0+aveE1+aveE2)*Nval**2)/chiN
print(" aveE = %.20f"%aveE)
print(" Precision[aveE]= ",120)
print(" Begin chiNm3,2,1abg: ")
time_start3 = time.time()
if flagUsePrecvi == True :
va2chiNm3ab = JS.List_CP(JS.quzhi_1(v2i,abA1),chiNm3ab,1)
chiNm3abg = JS.List_D(JS.List_Sub(JS.quzhi_1(va2chiNm3ab,agIX),JS.quzhi_1(va2chiNm3ab,bgIX),0),JS.quzhi_1(va2mvb2,abIX),1)
time_end4 = time.time()
print(" time of chiNm3abg = ",time_end4-time_start3)
Nv2i1 = JS.List_CP((Nval-1)**2,v2i,0)
Nv2i2 = JS.List_CP((Nval-2)**2,v2i,0)
chiNm2abg = JS.List_Sub(JS.quzhi_1(chiNm2ab,bgIX),JS.List_CP(JS.quzhi_1(Nv2i2,aInabg),chiNm3abg,1),0)
chiNm1abg = JS.List_Sub(JS.quzhi_1(chiNm1ab,bgIX),JS.List_CP(JS.quzhi_1(Nv2i1,aInabg),chiNm2abg,1),0)
else:
va2chiNm3ab = JS.List_CP(JS.quzhi_1(v2i,abA1),chiNm3ab,1)
va2chiNm2ab = JS.List_CP(JS.quzhi_1(v2i,abA1),chiNm2ab,1)
va2chiNm1ab = JS.List_CP(JS.quzhi_1(v2i,abA1),chiNm1ab,1)
va2mvb2 = np.array(va2mvb2).astype(float)
va2chiNm3ab = np.array(va2chiNm3ab).astype(float)
va2chiNm2ab = np.array(va2chiNm2ab).astype(float)
va2chiNm1ab = np.array(va2chiNm1ab).astype(float)
chiNm3abg = (np.array(JS.quzhi_1(va2chiNm3ab,agIX))-np.array(JS.quzhi_1(va2chiNm3ab,bgIX)))/np.array(JS.quzhi_1(va2mvb2,abIX))
chiNm2abg = (np.array(JS.quzhi_1(va2chiNm2ab,agIX))-np.array(JS.quzhi_1(va2chiNm2ab,bgIX)))/np.array(JS.quzhi_1(va2mvb2,abIX))
chiNm1abg = (np.array(JS.quzhi_1(va2chiNm1ab,agIX))-np.array(JS.quzhi_1(va2chiNm1ab,bgIX)))/np.array(JS.quzhi_1(va2mvb2,abIX))
print(" Precision[chiNm3abg] = ",17)
print(" Precision[chiNm2abg] = ",17)
print(" Precision[chiNm1abg] = ",17)
time_end5 = time.time()
print(" time of chiNm3,2,1abg = ",time_end5-time_start)
chiNm3abg = np.array(chiNm3abg).tolist()
chiNm3abg.append(0)
chiNm2abg = np.array(chiNm2abg).tolist()
chiNm2abg.append(0)
chiNm1abg = np.array(chiNm1abg).tolist()
chiNm1abg.append(0)
ab1 = JS.Ab(Omega,1)
mymat = JS.SparseArray(ab1,chiNm1ab,Omega,0)
mymatT = np.array(mymat).T
chiNm1abMat = JS.List_Add(mymat,mymatT,2)
mymat = JS.SparseArray(ab1,chiNm2ab,Omega,0)
mymatT = np.array(mymat).T
chiNm2abMat = JS.List_Add(mymat,mymatT,2)
Gc = JS.List_CP(GabList,chiNm1ab,1)
mymat = JS.SparseArray(ab1,Gc,Omega,0)
mymatT = np.array(mymat).T
GvchiNm1 = JS.List_DM(JS.List_Add(mymat,mymatT,2),vi,1)
Lc = JS.List_CP(LamabList,chiNm2ab,1)
mymat = JS.SparseArray(ab1,Lc,Omega,0)
mymatT = np.array(mymat).T
Lamv2chiNm2 = JS.List_DM(JS.List_Add(mymat,mymatT,2),v2i,1)
if int(len(abgIX)) == (Omega**2)*(Omega-1)/2:
None
else:
print(" Wrong Length ")
sys.exit()
print(" gEa expression1 Begins: ")
ENa0 = JS.List_DM(chiNm1abMat,v2Ge2,2)
Co1 = np.array(JS.quzhi_1(chiNm1abg,(np.array(abgIX)-1))).reshape((int(len(JS.quzhi_1(chiNm1abg,(np.array(abgIX)-1)))/Omega),Omega))
Co1 = JS.Setprecision(Co1,1,Myprecision)
ENa1 = JS.List_CP(JS.List_DM(Co1,vvGabList,2),2,0)
Co2 = np.array(JS.quzhi_1(chiNm2abg,(np.array(abgIX)-1))).reshape((int(len(JS.quzhi_1(chiNm1abg,(np.array(abgIX)-1)))/Omega),Omega))
Co2 = JS.Setprecision(Co2,1,Myprecision)
ENa2 = JS.List_CP(JS.List_DM(Co2,v2v2LamabList,2),2*((Nval-1)**2),0)
ENa01 = JS.List_Add(ENa0,ENa1,0)
ENa012 = JS.List_Add(ENa01,ENa2,0)
ENa = JS.List_CP(JS.List_D(ENa012,chiNa,1),Nval**2,0)
exp1gEa0 = JS.List_Sub(aveE,ENa,2)
exp1gEa1 = JS.List_CP(JS.List_D(GvchiNm1,chiNa,1),-Nval**2,0)
exp1vi = np.array(JS.List_D(exp1gEa0,exp1gEa1,1))*(-1)
exp1viEst = JS.List_CP(JS.List_D(JS.Setprecision(JS.List_Sub(JS.quzhi_1(eHF,Vspc),FermiHF,1),0,Myprecision),exp1gEa1,1),-2,0)
print(" Precision[exp1vi]= ",Myprecision)
eve01 = JS.List_Add(JS.List_D(exp1gEa0,vi,1),exp1gEa1,0)
exp1gEa = JS.List_CP(JS.List_CP(eve01,chiNa,1),(2/chiN),0)
exp1graderror = JS.List_DM(exp1gEa,vi,0)
print(" gEa expression2 Begins: ")
eG = JS.List_Add(JS.List_CP(ea,2,0),Gaa,0)
NLc = JS.List_CP(JS.List_D(Lamv2chiNm2,chiNm1a,1),2*((Nval-1)**2),0)
da = JS.List_Add(eG,NLc,0)
ENm1a0 = JS.List_DM(chiNm2abMat,v2Ge2,2)
Co2 = np.array(JS.quzhi_1(chiNm2abg,(np.array(abgIX)-1))).reshape((int(len(JS.quzhi_1(chiNm1abg,(np.array(abgIX)-1)))/Omega),Omega))
Co2 = JS.Setprecision(Co2,1,Myprecision)
ENm1a1 = JS.List_CP(JS.List_DM(Co2,vvGabList,2),2,0)
Co3 = np.array(JS.quzhi_1(chiNm3abg,(np.array(abgIX)-1))).reshape((int(len(JS.quzhi_1(chiNm1abg,(np.array(abgIX)-1)))/Omega),Omega))
Co3 = JS.Setprecision(Co3,1,Myprecision)
ENm1a2 = JS.List_CP(JS.List_DM(Co3,v2v2LamabList,2),2*((Nval-2)**2),0)
ENm01 = JS.List_Add(ENm1a0,ENm1a1,0)
ENm012 = JS.List_Add(ENm01,ENm1a2,0)
ENm1a = JS.List_CP(JS.List_D(ENm012,chiNm1a,1),(Nval-1)**2,0)
exp2gEa0 = JS.List_Sub(JS.List_Add(da,ENm1a,0),aveE,1)
exp2gEa1 = JS.List_D(GvchiNm1,chiNm1a,1)
exp2vi = np.array(JS.List_D(exp2gEa1,exp2gEa0,1))*(-1)
eVF = JS.List_Sub(JS.List_CP(JS.quzhi_1(eHF,Vspc),2,0),FermiHF,1)
eVF = JS.Setprecision(eVF,0,Myprecision)
exp2viEst = np.array(JS.List_D(exp2gEa1,eVF,1))*(-1)
print(" Precision[exp2vi]= ",Myprecision)
e2ve01 = JS.List_Add(JS.List_CP(exp2gEa0,vi,1),exp2gEa1,0)
exp2gEa = JS.List_CP(JS.List_CP(e2ve01,chiNm1a,1),((2*(Nval**2))/chiN),0)
exp2graderror = JS.List_DM(exp2gEa,vi,0)
print(" ***Compare results: ***")
exp1gEa = np.array(exp1gEa).astype(float)
exp2gEa = np.array(exp2gEa).astype(float)
print(" max grad_exp1 (0 after conv) = ",np.max(np.maximum(exp1gEa,-exp1gEa)))
print(" max grad_exp2 (0 after conv) = ",np.max(np.maximum(exp2gEa,-exp2gEa)))
print(" max (grad_exp1 - grad_exp2) (always 0) = ",np.max(np.maximum(exp1gEa - exp2gEa,-(exp1gEa - exp2gEa))))
print(" grad exp1 dot vi (always 0, because vi norm NOT affect E) = ",exp1graderror)
print(" grad exp2 dot vi (always 0, because vi norm NOT affect E) = ",exp2graderror)
e1vi = np.array(JS.List_Sub(exp1vi,vi,0)).astype(float)
e2vi = np.array(JS.List_Sub(exp2vi,vi,0)).astype(float)
print(" max(vi_exp1 - vi) (0 after conv) = ",np.max(np.maximum(e1vi,-e1vi)))
print(" max(vi_exp2 - vi) (0 after conv) = ",np.max(np.maximum(e2vi,-e2vi)))
ee12 = np.array(JS.List_Sub(exp1vi,exp2vi,0)).astype(float)
print(" max(vi_exp1 - vi_exp2) (0 after conv, solved by setting grad=0) = ",np.max(np.maximum(ee12,-ee12)))
bign = JS.Position(ni,0.5,0)
newvi = exp2vi
newvi = JS.Replace(newvi,exp1vi,bign,0)
newviEst = exp2viEst
newviEst = JS.Replace(newviEst,exp1viEst,bign,0)
print(" scaling = newvi[[viset1]] = ",newvi[viset1])
newviEst = JS.List_D(newviEst,newvi[viset1],0)
newvi = JS.List_D(newvi,newvi[viset1],0)
print(" Precision[newvi]= ",Myprecision)
newvi1 = JS.quzhi_1(newvi,IXprint)
newvi1 = np.array(newvi1).reshape((1,int(len(newvi1)))).T
newviEst1 = JS.quzhi_1(newviEst,IXprint)
newviEst1 = np.array(newviEst1).reshape((1,int(len(newviEst1)))).T
nEn = JS.List_Sub(JS.List_D(newviEst,newvi,1),1,1)
nEn = JS.quzhi_1(nEn,IXprint)
nEn = np.array(nEn).reshape((1,int(len(nEn)))).T
ni1 = JS.quzhi_1(ni,IXprint)
ni1 = np.array(ni1).reshape((1,int(len(ni1)))).T
vvn = np.concatenate([newvi1,newviEst1,nEn,ni1],axis = 1)
print(" vi,viEst,%,ni(Sorted)= ",vvn)
with open("vi_eachrun.txt","a") as file:
file.write(str(newvi))
vi = JS.Setprecision(newvi,0,Myprecision)
gradConv = np.max(np.maximum(exp2gEa,-exp2gEa))
EIter[i] = aveE
time_end = time.time()
timeIter[i] = time_end-time_start
print(" Final gradConv = %.10f" %gradConv)
"""
(*Compute E_total = E_L + (E_V + E_LV). (E_V + E_LV) is aveE above.*)
"""
EL = 0
if int(len(Lspc)) > 0:
eNL = JS.quzhi_1(eNil,Lspc)
eNL = JS.Setprecision(eNL,0,Myprecision)
LaL = JS.quzhi_2(Lamab,Lspc)
LaL = JS.Setprecision(LaL,1,Myprecision)
EL = 2*JS.Total(eNL,1)+JS.Total(LaL,0)
Etotal = aveE + EL +Eshift
print(" Etotal = ",Etotal)
print(" Successful. ") | [
"noreply@github.com"
] | noreply@github.com |
6b2129d4540d341a6410c27b6999df41db836415 | db6a43df3afa3465bb4aa5762c21ff27ad6c25eb | /miscut/view/admin.py | 9db3fe685cb6cfb0350c66f143e8888a44dff4fd | [] | no_license | oe1rfc/miscut | 3354f317781d3b37f1711a62ec897ec861a8bff6 | f9362bc3f07987dfc62c7795163e24bbbe9f188b | refs/heads/master | 2021-06-13T19:12:24.030431 | 2020-01-06T17:41:00 | 2020-01-06T17:42:57 | 154,849,614 | 0 | 0 | null | 2021-03-30T22:39:35 | 2018-10-26T14:45:28 | JavaScript | UTF-8 | Python | false | false | 3,898 | py | from flask_admin.actions import action
from flask import flash
from ..view import AdminView
from ..model import db
class AdminSegmentView(AdminView):
form_columns = ('event', 'videofile', 'segment_id', 'version', 'start', 'length', 'transition', 'transition_length', 'assigned')
column_list = ('event', 'videofile', 'segment_id', 'version', 'start', 'length', 'assigned', 'created_at', 'changed_at')
column_searchable_list = ('event.event_id', 'event.conference.code', 'videofile.storage_url')
column_filters = ('event.event_id', 'event.conference.code', 'videofile.storage_url', 'videofile.type', 'segment_id', 'version', 'assigned', 'created_at', 'changed_at')
class AdminFileView(AdminView):
can_view_details = True
column_searchable_list = ('conference.name', 'conference.code', 'storage_url', 'file_url')
details_template = 'file_details.html'
column_editable_list = ('active', 'deleted', 'comment')
column_filters = ('conference.code', 'type','active', 'deleted')
@action('deactivate', 'Deactivate', 'Are you sure you want to deactivate entries?')
def action_deactivate(self, ids):
try:
query = self.model.query.filter(self.model.id.in_(ids))
count = 0
for e in query.all():
e.active = False
count += 1
db.session.commit()
flash('%s successfully deactivated.' % count)
except Exception as ex:
if not self.handle_view_exception(ex):
raise
flash(gettext('Failed: %(error)s', error=str(ex)), 'error')
@action('activate', 'Activate', 'Are you sure you want to activate entries?')
def action_activate(self, ids):
try:
query = self.model.query.filter(self.model.id.in_(ids))
count = 0
for e in query.all():
e.active = True
count += 1
db.session.commit()
flash('%s successfully activated.' % count)
except Exception as ex:
if not self.handle_view_exception(ex):
raise
flash(gettext('Failed: %(error)s', error=str(ex)), 'error')
class AdminConferenceView(AdminView):
column_exclude_list = ('schedulexml', )
column_editable_list = ('active', )
class AdminEventView(AdminView):
can_view_details = True
column_searchable_list = ('conference.name', 'event_id', 'name', 'subtitle', 'personnames')
column_exclude_list = ('subtitle', 'duration', 'rendered_url', 'created_at', 'description_updated', 'changed_at')
column_editable_list = ('active', 'state', 'comment', 'record')
column_filters = ('conference.code', 'active', 'state', 'record', 'room')
@action('deactivate', 'Deactivate', 'Are you sure you want to deactivate entries?')
def action_deactivate(self, ids):
try:
query = self.model.query.filter(self.model.id.in_(ids))
count = 0
for e in query.all():
e.active = False
count += 1
db.session.commit()
flash('%s successfully deactivated.' % count)
except Exception as ex:
if not self.handle_view_exception(ex):
raise
flash(gettext('Failed: %(error)s', error=str(ex)), 'error')
@action('activate', 'Activate', 'Are you sure you want to activate entries?')
def action_activate(self, ids):
try:
query = self.model.query.filter(self.model.id.in_(ids))
count = 0
for e in query.all():
e.active = True
count += 1
db.session.commit()
flash('%s successfully activated.' % count)
except Exception as ex:
if not self.handle_view_exception(ex):
raise
flash(gettext('Failed: %(error)s', error=str(ex)), 'error')
| [
"spam-datacop@wireloss.net"
] | spam-datacop@wireloss.net |
3a9b17ad1550f22fccfe2def2dfcb461b3b451ec | 9dabd230e184424b0196e81a27ef39a762f7f9a3 | /pyramid_scheduler/__init__.py | 513b13bbbbe0a7f6caa77a5de25259ac51ffaa8e | [
"MIT"
] | permissive | cadithealth/pyramid_scheduler | be885af3cd64a649606e7b3cca5747e26a4aa49c | 65068f008336e1c03855c65e7fb8196e546b7998 | refs/heads/master | 2023-05-27T00:21:43.029673 | 2019-03-05T18:33:06 | 2019-03-05T18:33:06 | 9,380,656 | 14 | 6 | NOASSERTION | 2020-12-10T10:31:20 | 2013-04-11T21:25:01 | Python | UTF-8 | Python | false | false | 1,290 | py | # -*- coding: utf-8 -*-
#------------------------------------------------------------------------------
# file: $Id$
# lib: pyramid_scheduler
# auth: Philip J Grabner <grabner@cadit.com>
# date: 2013/04/15
# copy: (C) Copyright 2013 Cadit Inc., see LICENSE.txt
#------------------------------------------------------------------------------
'''
The ``pyramid-scheduler`` pyramid plugin allows asynchronous and
deferred task scheduling and management.
'''
import sys
from . import api
from .scheduler import Scheduler
#------------------------------------------------------------------------------
def includeme(config):
config.registry.scheduler = Scheduler(
settings = config.registry.settings,
appreg = config.registry,
)
config.registry.scheduler.startProducer()
# todo: there *must* be a better way of determining whether or not i am
# in an env where i should start the consumer...
if config.registry.scheduler.conf.combined \
and not [e for e in sys.argv if e.endswith('pshell') or e.endswith('pscheduler')]:
config.registry.scheduler.startConsumer()
#------------------------------------------------------------------------------
# end of $Id$
# $ChangeLog$
#------------------------------------------------------------------------------
| [
"pjg.github@ubergrabner.net"
] | pjg.github@ubergrabner.net |
8416f8f37f3a335de397a0cc3265c7f2f3f99373 | 09e22c1ad37d4a36833f51ced9a1dc2e0bfff19d | /app.py | a4842876c24707dde48635850f5d3a2e6d678ba5 | [] | no_license | happylusn/edopy | 6fcd3d35bfc92e54465780641eb36bc6bcbebbb4 | 4951dab7cae9eb453e4198d70a7d844770678322 | refs/heads/main | 2023-02-05T09:11:34.044041 | 2020-12-27T02:21:16 | 2020-12-27T02:21:16 | 321,282,478 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 845 | py | import os, logging
from aiohttp import web
from core.orm2 import create_pool, table, Model, IntegerField, StringField
from core.coroweb import add_routes, add_static
from config import configs
from core.function import init_jinja2
logging.basicConfig(level=logging.INFO)
@web.middleware
async def middleware1(request, handler):
try:
response = await handler(request)
if isinstance(response, web.StreamResponse):
return response
return web.Response(text=str(response))
except Exception as e:
return web.Response(text=str(e))
app = web.Application(middlewares=[middleware1])
init_jinja2(app)
add_routes(app, os.path.join(os.path.dirname(__file__), './api/v1'), 'api.v1')
add_static(app)
app.cleanup_ctx.append(create_pool(**configs.db))
if __name__ == '__main__':
web.run_app(app)
| [
"happylusn@163.com"
] | happylusn@163.com |
a8bf11893ca00b6dd3b2a5f68f0c44afae58e888 | 1a41cd019c77e0189700104d68e3d6c3d98bc2b8 | /Dependent_Package/User_Data.py | 09a3634986128117b9cc742370ef3fe9ad418406 | [] | no_license | Bankhead1995/SmartDoorLockSystem | 31d9c85aa48cd34222bf0333051fdb5cd8143f6c | 4c4e4d70d18ef34c47e60ca229f15ea42161f850 | refs/heads/main | 2023-04-03T09:08:49.195904 | 2021-04-10T01:45:58 | 2021-04-10T01:45:58 | 356,438,877 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,303 | py | import paho.mqtt.client as mqtt
from .T9 import T9_input
from .Face_Capture import capturingFace
from .Face_Trainner import trainner
from time import sleep
import os
class Usr_Data:
__userID = None
__userName = ''
__userPassCode = ''
__LCD = None
__KEYPAD = None
__BUZZER = None
__FPS = None
""" def __init__(self, lcd, keypad, buzzer ,fps, mobile_flag):
self.__LCD = lcd
self.__KEYPAD = keypad
self.__BUZZER = buzzer
self.__FPS = fps
if not mobile_flag
self.__nameEnroll()
self.__passcodeEnroll()
self.__fingerPrintEnroll()
self.__faceEnroll()
self.__enrollUserIntoServer() """
def __init__(self, *args):
if not args[0]:
self.__LCD = args[1]
self.__KEYPAD = args[2]
self.__BUZZER = args[3]
self.__FPS = args[4]
self.__nameEnroll()
self.__passcodeEnroll()
self.__fingerPrintEnroll()
self.__faceEnroll()
self.__enrollUserIntoServer()
#when register from mobile
#mobile_flag | LCD | FPS | username | passcode |
if args[0]:
self.__LCD = args[1]
self.__FPS = args[2]
self.__userName = args[3]
self.__userPassCode = args[4]
self.__fingerPrintEnroll()
self.__faceEnroll()
self.__enrollUserIntoServer()
def __nameEnroll(self):
self.__LCD.clear()
self.__LCD.write_string('Pleae enter your name ')
self.__userName = T9_input(self.__LCD, self.__KEYPAD,self.__BUZZER)
self.__LCD.clear()
self.__LCD.write_string("Hi "+self.__userName)
sleep(1)
def __passcodeEnroll(self):
verifyPasscodeFlag = False
while not verifyPasscodeFlag:
passcodeTemp = ''
verifyPasscode = ''
verifyPasscodeFlag = False
keyinput = ''
self.__LCD.clear()
self.__LCD.write_string("Please enter your Passcode on keypad (4 digits):")
self.__LCD.crlf()
while(len(passcodeTemp) < 4):
keyinput = self.__KEYPAD.getKey()
if keyinput is not None and keyinput is not 'A' and keyinput is not 'B' and keyinput is not 'C' and keyinput is not 'D' and keyinput is not '#' and keyinput is not '*':
self.__BUZZER.BUZ_ON()
passcodeTemp += keyinput
self.__LCD.clear()
self.__LCD.write_string("Please enter your Passcode on keypad (4 digits):")
self.__LCD.crlf()
for i in range(len(passcodeTemp)):
self.__LCD.write_string('*')
self.__LCD.clear()
self.__LCD.write_string("Please enter again for verify your passcode")
self.__LCD.crlf()
while(len(verifyPasscode) < 4):
keyinput = self.__KEYPAD.getKey()
if keyinput is not None and keyinput is not 'A' and keyinput is not 'B' and keyinput is not 'C' and keyinput is not 'D' and keyinput is not '#' and keyinput is not '*':
self.__BUZZER.BUZ_ON()
verifyPasscode += keyinput
self.__LCD.clear()
self.__LCD.write_string("Please enter again for verify your passcode")
self.__LCD.crlf()
for i in range(len(verifyPasscode)):
self.__LCD.write_string('*')
if len(verifyPasscode) is 4:
if verifyPasscode == passcodeTemp:
self.__LCD.clear()
self.__LCD.write_string('Verification successful')
sleep(1)
verifyPasscodeFlag = True
self.__userPassCode = passcodeTemp
else:
self.__LCD.clear()
self.__LCD.write_string('Verification failed please re-enter passcode')
sleep(2)
def __fingerPrintEnroll(self):
while (True):
self.__LCD.clear()
self.__LCD.write_string('Put your finger three times at the scanner to enroll')
try:
usrID_Temp = self.__FPS.enrollUser()
if usrID_Temp is not False:
self.__LCD.clear()
self.__LCD.write_string("Finger enroll successful")
self.__userID = usrID_Temp
sleep(1)
break
except :
self.__LCD.clear()
self.__LCD.write_string("Please Try again")
sleep(1.5)
pass
def __faceEnroll(self):
while True:
self.__LCD.clear()
self.__LCD.write_string("Lets enroll your face")
if capturingFace(str(self.__userID)):
self.__LCD.clear()
self.__LCD.write_string("please wait...")
trainner()
self.__LCD.clear()
self.__LCD.write_string("Face enrolled successful")
sleep(1)
break
else:
self.__LCD.clear()
self.__LCD.write_string("Face enroll failed")
sleep(1)
def retUsrID(self):
return self.__userID
def retUsrName(self):
return self.__userName
def changeName(self, newName):
self.__userName = newName
def changePasscode(self):
self.__passcodeEnroll()
def wirelessChangePasscode (self, newPasscode):
self.__userPassCode = newPasscode
def verifyPasscode(self, vfpc):
if vfpc == self.__userPassCode:
return True
else:
return False
def __enrollUserIntoServer(self):
os.chdir('/etc/mosquitto/')
os.system('echo %s|sudo -S %s' % ('lock', 'mosquitto_passwd -b passwd '+self.__userName+' '+self.__userPassCode))
os.system('echo %s|sudo -S %s' % ('lock', 'systemctl restart mosquitto.service'))
def retPasscode(self):
return self.__userPassCode | [
"noreply@github.com"
] | noreply@github.com |
98109fd6134554c75c5a772f7ae93f4656ca09cf | fe67390f6cfcf26c75f821f40df5670dbb648ebf | /python/example.py | d019dc9594d4de20537fc6f067fa908819e5368f | [] | no_license | Sprinterzzj/model_deployment | a133dc5cbc35a016f7adf6d3e6399c5b271a090f | 4ca4e270787e6dccac37e53f18c67aa50c4946a0 | refs/heads/master | 2022-12-25T02:20:43.392058 | 2020-09-27T10:36:28 | 2020-09-27T10:36:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 325 | py | from python_model import python_model
import numpy as np
model = python_model(model_path='../model/saved_pb/tensorflow.pb')
# 读取数据
f = np.load('../model/data/mnist.npz')
x_test, y_test = f['x_test'], f['y_test']
x_test = np.reshape(x_test, [-1, 784])
output=model.inference(x_test)
print(output.astype(np.int32))
| [
"gdyshi@126.com"
] | gdyshi@126.com |
dbc6452150f8f06e22b66ac18298bc53ce7bdf3a | 8bb4a472344fda15985ac322d14e8f4ad79c7553 | /Python3-Core/src/main/prompto/runtime/WidgetField.py | 66b4317c7bea2bed240b1d845097bc99cc1af084 | [] | no_license | prompto/prompto-python3 | c6b356f5af30c6826730ba7f2ad869f341983a2d | 64bd3d97d4702cc912097d41d961f7ab3fd82bee | refs/heads/master | 2022-12-24T12:33:16.251468 | 2022-11-27T17:37:56 | 2022-11-27T17:37:56 | 32,623,633 | 4 | 0 | null | 2019-05-04T11:06:05 | 2015-03-21T07:17:25 | Python | UTF-8 | Python | false | false | 201 | py | from prompto.runtime.Variable import Variable
from prompto.type.IType import IType
class WidgetField(Variable):
def __init__(self, name: str, itype: IType):
super().__init__(name, itype) | [
"eric.vergnaud@wanadoo.fr"
] | eric.vergnaud@wanadoo.fr |
05fb3095e4e3541975be0b03e7b6156b0e09a11f | d432a453ca399e40483cb6fd6f6a9d1e73442564 | /邮件/好看点的文本邮件.py | 4e730532c6a97de8e4635d8a89c954adf20b6046 | [] | no_license | gaoaolei/yufa | cae435910dcd8d981046662d13a4c355e6af7f4d | 9b824c4d05283a293f1bc0ab2de95731e4a31988 | refs/heads/master | 2023-04-29T22:46:41.047731 | 2023-04-24T06:40:33 | 2023-04-24T06:40:33 | 168,133,180 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 815 | py | # 配置邮件内容
from email.mime.text import MIMEText
from email.header import Header
from_address = '853573584@qq.com'
to_address = 'gaoaolei@km.com'
mail_host = "smtp.qq.com" # 设置服务器
mail_password = "mjxnwectfmfvbfih" # 口令
message = MIMEText('这是最简单的文本邮件','plain','utf8')
message['From'] = Header('QQ高傲雷<%s>' %from_address,'utf8')
message['To'] = Header('七猫高傲雷<%s>' %to_address,'utf8')
message['Subject'] = Header('这是一封python练习邮件','utf8')
# 配置邮件服务
import smtplib
# s = smtplib.SMTP(mail_host,25) # 这种走不通,好像qq邮箱必须走安全模式,如下
s = smtplib.SMTP_SSL()
s.connect(mail_host, 465)
s.login(from_address,mail_password)
# 发送邮件
s.sendmail(from_address,to_address,message.as_string())
s.quit() | [
"853573584@qq.com"
] | 853573584@qq.com |
937dd90a1569de5fb724834c1a6a357f2a550590 | b99a1b758567f02e963becb59bbd48a2f40a94a1 | /misc/ali/c.py | 0549b318417f9b059fea909110a6e3235c10222d | [] | no_license | sharingov/workspace | 81abb1ba7347e291e36533bdb33aa8ae0c9f4411 | f346857aad20c659c29c6e5f3198c29d5409a235 | refs/heads/master | 2023-07-17T11:50:57.183949 | 2021-08-30T17:11:58 | 2021-08-30T17:11:58 | 356,619,577 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 67 | py | n = 0
for _ in range(int(input())):
n += ord(input())
print(n)
| [
"dauletkhan.temirgaliyev@nu.edu.kz"
] | dauletkhan.temirgaliyev@nu.edu.kz |
8fe53ff0506d66c1da73533d1454e98788b42d84 | 9df71d5e3db1f427e590cf6d78077e092217aaf6 | /basket/migrations/0001_initial.py | 211e474135266dfbb69470fb6816782b2046fff1 | [] | no_license | usermuser/django_shop_example | 2fa0979ed00857ef950f4336c6974937ffb70cfc | 7fbf82c0ac80c0816d0df1ae621cec6086daaabf | refs/heads/master | 2022-09-20T08:40:39.173609 | 2020-06-03T10:25:02 | 2020-06-03T10:25:02 | 269,052,902 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,073 | py | # Generated by Django 2.1.7 on 2019-04-11 16:27
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('mainapp', '0003_auto_20190404_1811'),
]
operations = [
migrations.CreateModel(
name='Basket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('quantity', models.PositiveIntegerField(default=0, verbose_name='количество')),
('add_datetime', models.DateTimeField(auto_now_add=True, verbose_name='время')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='mainapp.Product')),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='basket', to=settings.AUTH_USER_MODEL)),
],
),
]
| [
"lwrdrstv@gmail.com"
] | lwrdrstv@gmail.com |
8c5e668d090b4e4c2959d550da347983243de985 | f2aa9bbe11233982b048c3aac5c67c906186f8e2 | /proglect/lecteur.py | 89e766fbdc14df0cf0706a422943853f0baa2a65 | [] | no_license | drasss/drasss | 5c4ff619ed103ee11d1d882d9333be3f3997967b | 5b6dde48a2dc3f8f31dcd45680f809adb60f49b9 | refs/heads/main | 2023-05-19T15:14:38.872760 | 2021-05-29T20:53:07 | 2021-05-29T20:53:07 | 372,067,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,847 | py | import os.path,os,time,urllib.parse
#recuperation des noms de musiques dans un fichier txt (a cause de l'enodage UTF-8 on peut pas faire une seule commande)
#expliquant les "encoding="utf-8""
os.system("start recupmp3.bat")
time.sleep(0.5)
#lecture des noms du fichier txt dans une variable
lect=open("recup.txt","r",encoding="utf-8")
noms=lect.read()+"₾"
lect.close()
#séparation des noms dans un tuple
tab=("",)
i,j=0,0
st=""
#tant que le fichier n'est pas fini(
# tant que le fichier n'est pas fini et qu'il n'y a pas de retour a la ligne(
# lire la lettre et l'ajouter dans le str qui conserve la phrase)
# prendre la phrase et la rajouter dans le tableau (puis repartir de 0))
while noms[i]!="₾":
while (noms[i]!="." or noms[i+1]!="m" or noms[i+2]!="p" or noms[i+3]!="3") and noms[i]!="₾":
st+=noms[i]
i+=1
tab+=(st,)
st=""
i+=5
#exriture du fichier html
fichier=open("lecteur.html","w",encoding="utf-8")
fichier.write("<html><style type=\"text/css\" media=\"all\">@import \"css.css\";</style>\n")
#head
fichier.write("<head>")
fichier.write("<title>Lecteur Mp3</title>") #titre
fichier.write("<link rel=\"icon\" href=\"img\icone.png\" />") #icone
fichier.write("<meta charset=\"UTF-8\">")#utf-8 pour mozilla
fichier.write("")#
#mise en place des sons
fichier.write("\n</head><body>\n")
#pour ne pas utiliser de variable j utiliser "i+1"
for i in range(len(tab)-1):
fichier.write("<p>"+tab[i+1]+"</p>\n")
fichier.write("<audio controls=\"\" preload=\"none\" loop=\"true\"><source src=\"..\mp3\\"+urllib.parse.quote(tab[i+1])+".mp3\" type=\"audio/mpeg\"></audio>") #urllib.parse.quote() sert a transformer le char en url
fichier.write("</body>\n</html>\n")
fichier.close()
os.system("start lecteur.html")
| [
"noreply@github.com"
] | noreply@github.com |
70a552416af6b9e157994c1863a7fa3035a0ec75 | c48c4de9d221de538ac2a0e88430e4ad1c8a060c | /Python/AnalisisDeFigurasGeomtricas/main.py | cf4e02cf15c1976c77001d1d00e5013e9eac307b | [] | no_license | AlexBracamonte/Cris_Review | f95cc90f9fee819f1b4ea3c559723574e3820e91 | d7cdd3859a1e61ee212ccf975b2ec36678ff6adf | refs/heads/main | 2023-08-14T06:07:29.649469 | 2021-09-07T17:01:43 | 2021-09-07T17:01:43 | 404,043,692 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 19,746 | py | import os
import sys
from tabulate import tabulate
from math import pi
import numpy as np
from matplotlib.collections import PatchCollection
import matplotlib.pyplot as plt
import matplotlib.patches as patches
class Poligono:
def __init__(self):
self.figuras = []
self.data = []
self.dibujar = []
self.nodibujar = []
self.resultados = []
def rectangulo_add(self):
"""Rectangulo 1"""
b = float(input(f"Ingresar el valor base la figura: "))
h = float(input(f"Ingresar el valor altura la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = h * b
xp = b / 2
yp = h / 2
ix = (b * (h ** 3)) / 12
iy = (h * (b ** 3)) / 12
if hueco:
rec = patches.Rectangle((x, y), b, h, fill=True)
self.nodibujar.append(rec)
else:
rec = patches.Rectangle((x, y), b, h, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
def triangulo_add(self):
"""Triangulo"""
b = float(input(f"Ingresar el valor base la figura: "))
h = float(input(f"Ingresar el valor altura la figura: "))
a = float(input(f'Ingresar el valor de a: '))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = (b * h) / 2
xp = (a + b) / 3
yp = h / 3
ix = (b * (h ** 3)) / 36
iy = (b * h / 36) * ((a ** 2) - (a * b) + (b ** 2))
if hueco:
rec = triagulo(x, y, b, h, a)
self.nodibujar.append(rec)
else:
rec = triagulo(x, y, b, h, a)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp + x, yp + y, ix, iy]
def circulo_add(self, tipo):
if tipo == 1:
"""Completo"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2)
xp = 0
yp = 0
ix = (pi * (r ** 4)) / 4
iy = (pi * (r ** 4)) / 4
if hueco:
rec = patches.Circle((x, y), r, fill=True)
self.nodibujar.append(rec)
else:
rec = patches.Circle((x, y), r, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
if tipo == 2:
"""Semiciculo superior"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2) / 2
xp = 0
yp = 4 * r / (3 * pi)
ix = 0.1098 * (r ** 4)
iy = (r ** 4) * pi / 8
if hueco:
rec = patches.Wedge([x, y], r, 0, 180, fill=True)
self.nodibujar.append(rec)
else:
rec = patches.Wedge([x, y], r, 0, 180, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
if tipo == 3:
"""Semiciculo derecha"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2) / 2
xp = (4 * r) / (3 * pi)
yp = 0
ix = (r ** 4) * pi / 8
iy = 0.1098 * (r ** 4)
if hueco:
rec= patches.Wedge([x, y], r, 0, 180, fill=True, color='#white')
self.nodibujar.append(rec)
else:
rec= patches.Wedge([x, y], r, -90, 90, fill=True, color='#00AAE4')
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
if tipo == 4:
"""Semiciculo inferior"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2) / 2
xp = (-4 * r) / (3 * pi)
yp = 0
ix = 0.1098 * (r ** 4)
iy = (r ** 4) * pi / 8
if hueco:
rec= patches.Wedge([x, y], r, 180, 360, fill=True)
self.nodibujar.append(rec)
else:
rec= patches.Wedge([x, y], r, 180, 360, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
if tipo == 5:
"""Semiciculo izquierda"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2) / 2
xp = (-4 * r) / (3 * pi)
yp = 0
ix = (r ** 4) * pi / 8
iy = 0.1098 * (r ** 4)
if hueco:
rec = patches.Wedge([x, y], r, 90, 270, fill=True)
self.nodibujar.append(rec)
else:
rec = patches.Wedge([x, y], r, 90, 270, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
if tipo == 6:
"""Cuarto I"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2) / 4
xp = (4 * r) / (3 * pi)
yp = (4 * r) / (3 * pi)
ix = (r ** 4) * ((pi / 16) - (4 / (9 * pi)))
iy = 0.05487 * (r ** 4)
if hueco:
rec= patches.Wedge([x, y], r, 0, 90, fill=True)
self.nodibujar.append(rec)
else:
rec= patches.Wedge([x, y], r, 0, 90, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
if tipo == 7:
"""Cuarto II"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2) / 4
xp = (-4 * r) / (3 * pi)
yp = (4 * r) / (3 * pi)
ix = (r ** 4) * ((pi / 16) - (4 / (9 * pi)))
iy = 0.05487 * (r ** 4)
if hueco:
rec= patches.Wedge([x, y], r, 90, 180, fill=True)
self.nodibujar.append(rec)
else:
rec= patches.Wedge([x, y], r, 90, 180, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
if tipo == 8:
"""Cuarto III"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2) / 4
xp = (-4 * r) / (3 * pi)
yp = (-4 * r) / (3 * pi)
ix = (r ** 4) * ((pi / 16) - (4 / (9 * pi)))
iy = 0.05487 * (r ** 4)
if hueco:
rec= patches.Wedge([x, y], r, 180, 270, fill=True)
self.nodibujar.append(rec)
else:
rec= patches.Wedge([x, y], r, 180, 270, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
if tipo == 9:
"""Cuarto IV"""
r = float(input(f"Ingresar el valor radio la figura: "))
x = float(input(f"Ingresa el desplazamiento en x: "))
y = float(input(f"Ingresa el desplazamiento en y: "))
while True:
hueco = input(f"¿Esta figura es un hueco? (s/n): ").lower()
if hueco == "s" or hueco == "n":
if hueco == 's':
hueco = True
else:
hueco = False
break
area = pi * (r ** 2) / 4
xp = (4 * r) / (3 * pi)
yp = (-4 * r) / (3 * pi)
ix = (r ** 4) * ((pi / 16) - (4 / (9 * pi)))
iy = 0.05487 * (r ** 4)
if hueco:
rec = patches.Wedge([x, y], r, 270, 360, fill=True)
self.nodibujar.append(rec)
else:
rec = patches.Wedge([x, y], r, 270, 360, fill=True)
self.dibujar.append(rec)
self.figuras.append([hueco, area, x, y, xp + x, yp + y, ix, iy])
return [hueco, area, x, y, xp, yp, ix, iy]
else:
print(f'El valor no esta dentro de la lista')
pass
def calcular_centroide(self):
suma_area = 0
suma_ax = 0
suma_ay = 0
self.data = []
headers = ['No Figura',
'Area [u²]',
'Xi [u]',
'Yi [u]',
'A*Xi [u³]',
'A*Yi [u³]',
"Ix' [u]",
"Iy' [u]",
"Dx [u]",
'Dy [u]',
'Ixc[u⁴]',
'Iyc[u⁴]']
for figura in self.figuras:
aux = [0] * 11
# [hueco, area, x, y, xp, yp, ix, iy]
if figura[0]:
aux[0] = -1 * figura[1]
aux[5] = -1 * figura[6]
aux[6] = -1 * figura[7]
else:
aux[0] = figura[1]
aux[5] = figura[6]
aux[6] = figura[7]
aux[1] = figura[4] # Xi
aux[2] = figura[5] # Yi
aux[3] = aux[0] * aux[1] # Axi
aux[4] = aux[0] * aux[2] # Ayi
aux[5] = figura[6] # I'x
aux[6] = figura[7] # I'y
suma_area = suma_area + aux[0]
suma_ax = suma_ax + aux[3]
suma_ay = suma_ay + aux[4]
self.data.append(aux)
Cx = suma_ax/suma_area
Cy = suma_ay/suma_area
sumaix = 0
sumaiy = 0
for figura in self.data:
print(Cx - figura[1])
print(figura)
self.data[self.data.index(figura)][7] = (Cx - figura[1]) # Dx
self.data[self.data.index(figura)][8] = (Cy - figura[2]) # Dy
print(figura)
self.data[self.data.index(figura)][9] = figura[5] + (figura[0] * (figura[8] ** 2)) # Ixc
self.data[self.data.index(figura)][10] = figura[6] + (figura[0] * (figura[7] ** 2)) # Iyc
if (figura[0]/figura[0]) == -1:
figura[9] = -figura[9]
figura[10] = -figura[10]
sumaiy = sumaiy + figura[10]
sumaix = sumaix + figura[9]
a = f"\n\nEl centroide de la figura se ubica en:\n" \
f"\tPx = {Cx}\n" \
f"\tPy = {Cy}"
print(a)
a = f"\n\nEL resutado de la inercia es: \n" \
f"\tIx = {sumaix}\n" \
f"\tIy = {sumaiy}"
print(a)
rec = patches.Circle((Cx, Cy), 0.25, fill=True)
self.resultados.append(rec)
self.resultados.append(rec)
tabla = tabulate(self.data, headers=headers, showindex=True)
print(tabla)
with open("tabla.txt", 'w') as f:
f.write(tabla)
a = f"\n\nEl centroide de la figura se ubica en:\n" \
f"\tPx = {Cx}\n" \
f"\tPy = {Cy}"
f.write(a)
a = f"\n\nEL resutado de la inercia es: \n" \
f"\tIx = {sumaix}\n" \
f"\tIy = {sumaiy}"
f.write(a)
f.write("\n\nGracias por usar")
f.write("\n\nFI - UAEMex, 2021")
def eliminar_figura(self):
headers = ['No Figura',
'Hueco',
'X [u]',
'Y [u]',
'Xp [u]',
'Yp [u]',
"Ix' [u]",
"Iy' [u]"]
print(tabulate(self.figuras, headers=headers, showindex=True))
print('¿Qué columa quieres eliminar?')
sel = int(input('>> '))
self.figuras.pop(sel)
self.dibujar.pop(sel)
def dibujar_figura(self):
fig, ax = plt.subplots()
p = PatchCollection(self.dibujar, alpha=0.4, facecolors= 'green')
d = PatchCollection(self.nodibujar, alpha=0.4, facecolors='white')
q = PatchCollection(self.resultados, alpha=0.4, facecolors='red')
ax.add_collection(p)
ax.add_collection(d)
ax.add_collection(q)
ax.set_title('Centroide de la figura')
plt.grid()
plt.xlabel("x [u]")
plt.ylabel("y [u]")
plt.autoscale()
plt.savefig('Figura.png')
plt.show()
def triagulo(x, y, b, h, a):
pol1 = patches.Polygon(np.array([[x, y], [x + a, y + h], [x + b, y]]))
return pol1
def borrar_pantalla(): # Definimos la función estableciendo el nombre que queramos
if sys.platform.startswith('win'):
os.system('cls')
elif sys.platform.startswith('darwin'):
os.system('clear')
elif sys.platform.startswith('linux'):
os.system('clear')
def mostrar_figuras():
figuras = ['Rectangulo', 'Triangulo', 'Circulo']
print(f"¿Qué figura deseas agregar? (Seleccionar un numero)")
for figura in figuras:
print(f"{figuras.index(figura) + 1}.- {figura}")
def mostar_circulos():
figuras = ['Completo', 'Medio circulo superior', 'Medio circulo derecha',
'Medio circulo inferior', 'Medio circulo izquierda',
'Cuarto circulo I', 'Cuarto circulo II', 'Cuarto circulo III', 'Cuarto circulo IV']
print(f"¿Qué figura deseas agregar? (Seleccionar un numero)")
for figura in figuras:
print(f"{figuras.index(figura) + 1}.- {figura}")
def mensaje():
txt = f"1. Agregar una figura \n" \
f"2. Eliminar alguna figura\n" \
f"3. Calcular \n" \
f"salir"
print(txt)
if __name__ == '__main__':
f_geomtricas = ['Rectangulo', 'Triangulo', 'Circulo']
p = Poligono()
borrar_pantalla()
print("//---SOFTWARE DESARROLLADO POR ALEJANDRO BRACAMONTE---//")
print("Bienvenido, ¿Que es lo que quieres hacer?")
while True:
borrar_pantalla()
mensaje()
selector = input('>> ').lower()
if selector == 'salir':
break
if selector == '1': # Agregar figura
borrar_pantalla()
mostrar_figuras()
sel_figure = input('>> ').lower()
if sel_figure == '1': # Rectangulo
borrar_pantalla()
p.rectangulo_add()
if sel_figure == '2': # Triangulo
borrar_pantalla()
p.triangulo_add()
if sel_figure == '3': # Circulos
borrar_pantalla()
mostar_circulos()
selector = int(input('>> '))
borrar_pantalla()
p.circulo_add(selector)
if selector == '2': # Eliminar una figura
borrar_pantalla()
p.eliminar_figura()
if selector == '3': # Realizar Calculos
borrar_pantalla()
p.calcular_centroide()
p.dibujar_figura()
quit()
| [
"noreply@github.com"
] | noreply@github.com |
ba788017d2d6b1db9dc5278d8d661bfb68cc0d95 | 496a01c0bc8d6bd9a19349cf578b273beb1093f1 | /SendDataToMatlab.py | a67f68356037432ece0877ce4a1f58fd662f1302 | [] | no_license | BlueBirdHouse/CarND-Traffic-Sign-Classifier-Project | 927276bff16450e5458a33a72099bd5aea03d1ba | 6b6eae2367a1ef565131784a90e957df8bd007a9 | refs/heads/master | 2021-05-07T18:29:10.210453 | 2017-11-15T14:03:02 | 2017-11-15T14:03:02 | 108,790,607 | 0 | 0 | null | 2017-10-30T02:12:29 | 2017-10-30T02:12:28 | null | UTF-8 | Python | false | false | 2,055 | py | #包文件导入区
import pickle
from zipfile import ZipFile
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from sklearn.utils import shuffle
import tensorflow as tf
import matlab.engine
#自己的包定义区
import Support
#%%数据文件导入区
FileName = './traffic-signs-data.zip'
zipf = ZipFile(FileName)
# 长生一个zip内文件名集合
filenames_pbar = tqdm(zipf.namelist(), unit='files')
# 循环读取ZIP文件内所有数据
for filename in filenames_pbar:
# 检查是否是目录,防止有其他东西
if not filename.endswith('/'):
image_file = zipf.open(filename)
#按照需要打开文件
if filename == 'test.p':
test = pickle.load(image_file)
if filename == 'train.p':
train = pickle.load(image_file)
if filename == 'valid.p':
valid = pickle.load(image_file)
image_file.close()
zipf.close()
X_train, y_train = train['features'], train['labels']
X_valid, y_valid = valid['features'], valid['labels']
X_test, y_test = test['features'], test['labels']
#%%训练数据
#显示统计数据
#训练集的个数
n_train = np.shape(y_train)[0]
#验证集的个数
n_validation = np.shape(y_valid)[0]
#测试集的个数
n_test = np.shape(y_test)[0]
#交通标志图像的大小
image_shape = np.shape(X_train)[1::]
#不同标志的个数
n_classes = np.max(y_train)+1
print("Number of training examples =", n_train)
print("Number of testing examples =", n_test)
print("Image data shape =", image_shape)
print("Number of classes =", n_classes)
#%%训练图像处理过程
#随机排列图像
X_train, y_train = shuffle(X_train, y_train)
#将数据转移到Matlab做再处理
#Support.FigureDataToMatlab(X_valid,0,4410,'X_valid')
#Support.FigureDataToMatlab(y_valid,0,4410,'y_valid')
Support.FigureDataToMatlab(X_test,0,12630,'X_test')
Support.FigureDataToMatlab(y_test,0,12630,'y_test')
'''
Support.FigureDataToMatlab(X_train,0,34799,'X_train')
Support.FigureDataToMatlab(y_train,0,34799,'y_train')
'''
| [
"bluebirdhouse@icloud.com"
] | bluebirdhouse@icloud.com |
4494b2fbf7018b97ce43e9549b2011a235080f46 | 9bed0ff093b968a74faaf6046991bf5cd86c4a24 | /snifter/template.py | da7fd49594f35aa41a4ef1bf35abcebca8b3141b | [
"BSD-3-Clause"
] | permissive | jackvandrunen/snifter | e5350a7e5134aa30fa6eb9310135f90a84e3ed67 | 30dbd71b5d76841edcd327c900870855601e39d4 | refs/heads/master | 2021-05-28T02:40:47.406699 | 2014-12-25T03:35:03 | 2014-12-25T03:35:03 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,878 | py | from __future__ import print_function
import sys
py3 = sys.version_info >= (3,0)
import io
import re
import os
import itertools
import functools
TEMPLATE_PATH = ['.', './views']
TEMPLATE_EXTS = ['', '.tpl', '.html']
def render(code, **kwargs):
expre = re.compile(r'<%=\s*(.*?)\s*%>', re.DOTALL)
blkre = re.compile(r'<%=?\s*(.*?)\s*%>', re.DOTALL)
match = blkre.search(code)
while match:
statement = match.group(1)
if expre.match(match.group()):
result = eval(statement, {}, kwargs)
code = '{0}{1}{2}'.format(code[:match.start()], result, code[match.end():])
else:
result = io.StringIO() if py3 else io.BytesIO()
namespace = {'print': result.write,
'include': functools.partial(_include, namespace=kwargs, print=result.write)}
exec(statement, namespace, kwargs)
code = '{0}{1}{2}'.format(code[:match.start()], result.getvalue(), code[match.end():])
match = blkre.search(code)
return code
def _include(template, namespace, print=print):
code = _loadtpl(template)
print(render(code, **namespace))
def _loadtpl(template):
for path, ext in itertools.product(TEMPLATE_PATH, TEMPLATE_EXTS):
source = os.path.join(path, '{0}{1}'.format(template, ext))
if os.path.exists(source):
break
else:
raise RuntimeError('Template file not found')
mode = 'r' if py3 else 'rb'
with open(source, mode) as f:
return f.read()
def view(template):
code = _loadtpl(template)
def wrapper(func):
def servetpl(*args, **kwargs):
namespace = func(*args, **kwargs)
return render(code, **namespace)
return servetpl
return wrapper
def template(template, **kwargs):
code = _loadtpl(template)
return render(code, **kwargs)
| [
"jack@fallingduck.net"
] | jack@fallingduck.net |
ab2d0f7e8d5a43526a5e8a5af4816cbc8a991582 | e8268566f1bbbb19f75fd612ec400b2d5f729f5b | /lectures/Ex.11.py | 6f43e17a579a263051e733d174c4eff14c251ad7 | [] | no_license | j-kincaid/LC101-practice-files | f52df80b5ab11ceff04b58ea0a8a8997d6270bf0 | 5a6b50c17b65a7f6239a445265c914fd3656cbd6 | refs/heads/master | 2020-04-10T14:49:25.920906 | 2018-12-10T20:34:52 | 2018-12-10T20:34:52 | 161,088,607 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 517 | py | def sum_all(nums):
# your code here
found_first = False
total = 0
for n in nums:
if not found_first and n % 2 ==0:
found_first = True
continue
total += n
return total
print(sum_all([1,2,3,4]))
from test import testEqual
testEqual(sum_of_initial_odds([1,3,1,4,3,8]), 5)
testEqual(sum_of_initial_odds([6,1,3,5,7]), 0)
testEqual(sum_of_initial_odds([1, -7, 10, 23]), -6)
testEqual(sum_of_initial_odds(range(1,555,2)), 76729) | [
"4j.kincaid@gmail.com"
] | 4j.kincaid@gmail.com |
191484b45d9f5be8b3b00a8c2a6d15772c38782c | abc72a2f2072ab7a5a338e41d81c354324943b09 | /tarefa11/sugestao.py | 9c645d2ab609253931239f50b72395ec34a5b090 | [] | no_license | gigennari/mc102 | a3d39fd9a942c97ef477a9b59d7955f4269b202a | fce680d5188a8dfb0bc1832d6f430cbcaf68ef55 | refs/heads/master | 2023-04-05T01:40:58.839889 | 2020-07-27T20:33:56 | 2020-07-27T20:33:56 | 354,130,720 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,420 | py | """
Escreva um programa que, dadas duas palavras, sugere ao usuário qual será
a próxima palavra, tal que essa escolha seja baseada na probabilidade de que
essas três palavras apareçam nessa ordem em um determinado texto.
Entrada:
1ª linha - o caminho do arquivo de texto
2ª linha - sequência de pares de palavras, um por linha
Saída:
frase sugerida com 3 palavras, um trio por linha
Exemplo:
E:
testes/tetxto0.in
buraco negro
buracos negros
campo gravitacional
S:
buraco negro é
buracos negros não
campo gravitacional pode
Vamos criar uma classe com 3 palavras
"""
class Frase: #cria uma nova classe de abstração; coleção fixa de propriedades
def __init__(self, p1, p2, p3):
self.p1 = p1
self.p2 = p2
self.p3 = p3
def __repr__(self):
return "% s % s % s" % (self.p1, self.p2, self.p3)
def ler_entrada():
"""lê caminho do texto e duas palavras do texto por linha, formando uma lista de Frase"""
caminho = input()
lista = []
while True:
try:
palavras = input().split()
frase = Frase(palavras[0], palavras[1], "")
lista.append(frase)
except EOFError:
break
return caminho, lista
def ler_arquivo_texto(nome_do_arquivo):
"""lê arquivo de texto, chama função para limpar pontuação e transforma em lista"""
texto = open(nome_do_arquivo).read().lower()
texto = limpar_pontuacao(texto)
palavras = texto.split()
return palavras
def limpar_pontuacao(string):
pontuacao = ".,;:?!''/*#@&" #sem hifen
sem_pontucao = ""
for letra in string:
if letra not in pontuacao:
sem_pontucao += letra
return sem_pontucao
def descobrir_frequencias(palavras):
""" contar a frequencia de cada uma das palavras; devolve dict """
wordcount = {} #vamos usar um dict para armazenar a palavra e sua frequencia juntos
for palavra in palavras:
if palavra in wordcount:
wordcount[palavra] += 1
else:
wordcount[palavra] = 1
return wordcount
def encontrar_proxima_palavra(texto, frase):
"""procura todas as próximas palavras, adiciona em uma lista, conta frequencia (dict), frase.p3 = palavra de maior frequencia;
a mais frequente será a próxima"""
proximas = []
for i, palavra in enumerate(texto): #procura próximas palavras
if texto[i] == frase.p1:
if texto[i+1] == frase.p2:
seguinte = texto[i+2]
proximas.append(seguinte) #adiciona todas as palavras que aparecem depois das duas primeiras
#contar frequencia da lista --> dict --> dict.keys()
proximas_dict = descobrir_frequencias(proximas)
proximasordenadas = sorted(proximas_dict.items(), key=lambda a: a[0]) #devolve uma lista de tuplas (palavra, frequencia) em ordem alfabética
proximasordenadas.sort(key=lambda f: f[1], reverse=True) #ordena a lista em ordem decrescente
#frase.p3 = 1º elemento do dicionário
frase.p3 = proximasordenadas[0][0]
return frase
def main():
caminho, frases = ler_entrada()
texto = ler_arquivo_texto(caminho)
trios = []
for frase in frases:
frase_completa = encontrar_proxima_palavra(texto, frase)
trios.append(frase_completa)
for frase in trios:
print(frase)
main() | [
"g198010@dac.unicamp.br"
] | g198010@dac.unicamp.br |
ce0ae0062dc3d00fd5663f4b0747d7581a81659d | 733fbbac5dda7f63eb1e962a927dac321b88afd4 | /vnv/Lib/site-packages/google/cloud/dialogflow_v2/services/agents/transports/base.py | 8987cfbe7d069f849a5ae66c64eb0ba7612dd44d | [] | no_license | Kariyma/RussianPostBot | 1ad22b9a56917f6a160c44642c334544b9fc6e25 | a31a02e003a1435f62b4d1cfb36556615bc8586e | refs/heads/master | 2023-03-13T22:54:01.315194 | 2021-03-12T03:29:30 | 2021-03-12T03:29:30 | 346,217,417 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,494 | py | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import abc
import typing
import pkg_resources
from google import auth # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.api_core import operations_v1 # type: ignore
from google.auth import credentials # type: ignore
from google.cloud.dialogflow_v2.types import agent
from google.cloud.dialogflow_v2.types import agent as gcd_agent
from google.cloud.dialogflow_v2.types import validation_result
from google.longrunning import operations_pb2 as operations # type: ignore
from google.protobuf import empty_pb2 as empty # type: ignore
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-dialogflow",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
class AgentsTransport(abc.ABC):
"""Abstract transport class for Agents."""
AUTH_SCOPES = (
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/dialogflow",
)
def __init__(
self,
*,
host: str = "dialogflow.googleapis.com",
credentials: credentials.Credentials = None,
credentials_file: typing.Optional[str] = None,
scopes: typing.Optional[typing.Sequence[str]] = AUTH_SCOPES,
quota_project_id: typing.Optional[str] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
**kwargs,
) -> None:
"""Instantiate the transport.
Args:
host (Optional[str]): The hostname to connect to.
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
credentials_file (Optional[str]): A file with credentials that can
be loaded with :func:`google.auth.load_credentials_from_file`.
This argument is mutually exclusive with credentials.
scope (Optional[Sequence[str]]): A list of scopes.
quota_project_id (Optional[str]): An optional project to use for billing
and quota.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
"""
# Save the hostname. Default to port 443 (HTTPS) if none is specified.
if ":" not in host:
host += ":443"
self._host = host
# If no credentials are provided, then determine the appropriate
# defaults.
if credentials and credentials_file:
raise exceptions.DuplicateCredentialArgs(
"'credentials_file' and 'credentials' are mutually exclusive"
)
if credentials_file is not None:
credentials, _ = auth.load_credentials_from_file(
credentials_file, scopes=scopes, quota_project_id=quota_project_id
)
elif credentials is None:
credentials, _ = auth.default(
scopes=scopes, quota_project_id=quota_project_id
)
# Save the credentials.
self._credentials = credentials
# Lifted into its own function so it can be stubbed out during tests.
self._prep_wrapped_messages(client_info)
def _prep_wrapped_messages(self, client_info):
# Precompute the wrapped methods.
self._wrapped_methods = {
self.get_agent: gapic_v1.method.wrap_method(
self.get_agent, default_timeout=None, client_info=client_info,
),
self.set_agent: gapic_v1.method.wrap_method(
self.set_agent, default_timeout=None, client_info=client_info,
),
self.delete_agent: gapic_v1.method.wrap_method(
self.delete_agent, default_timeout=None, client_info=client_info,
),
self.search_agents: gapic_v1.method.wrap_method(
self.search_agents, default_timeout=None, client_info=client_info,
),
self.train_agent: gapic_v1.method.wrap_method(
self.train_agent, default_timeout=None, client_info=client_info,
),
self.export_agent: gapic_v1.method.wrap_method(
self.export_agent, default_timeout=None, client_info=client_info,
),
self.import_agent: gapic_v1.method.wrap_method(
self.import_agent, default_timeout=None, client_info=client_info,
),
self.restore_agent: gapic_v1.method.wrap_method(
self.restore_agent, default_timeout=None, client_info=client_info,
),
self.get_validation_result: gapic_v1.method.wrap_method(
self.get_validation_result,
default_timeout=None,
client_info=client_info,
),
}
@property
def operations_client(self) -> operations_v1.OperationsClient:
"""Return the client designed to process long-running operations."""
raise NotImplementedError()
@property
def get_agent(
self,
) -> typing.Callable[
[agent.GetAgentRequest],
typing.Union[agent.Agent, typing.Awaitable[agent.Agent]],
]:
raise NotImplementedError()
@property
def set_agent(
self,
) -> typing.Callable[
[gcd_agent.SetAgentRequest],
typing.Union[gcd_agent.Agent, typing.Awaitable[gcd_agent.Agent]],
]:
raise NotImplementedError()
@property
def delete_agent(
self,
) -> typing.Callable[
[agent.DeleteAgentRequest],
typing.Union[empty.Empty, typing.Awaitable[empty.Empty]],
]:
raise NotImplementedError()
@property
def search_agents(
self,
) -> typing.Callable[
[agent.SearchAgentsRequest],
typing.Union[
agent.SearchAgentsResponse, typing.Awaitable[agent.SearchAgentsResponse]
],
]:
raise NotImplementedError()
@property
def train_agent(
self,
) -> typing.Callable[
[agent.TrainAgentRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
@property
def export_agent(
self,
) -> typing.Callable[
[agent.ExportAgentRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
@property
def import_agent(
self,
) -> typing.Callable[
[agent.ImportAgentRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
@property
def restore_agent(
self,
) -> typing.Callable[
[agent.RestoreAgentRequest],
typing.Union[operations.Operation, typing.Awaitable[operations.Operation]],
]:
raise NotImplementedError()
@property
def get_validation_result(
self,
) -> typing.Callable[
[agent.GetValidationResultRequest],
typing.Union[
validation_result.ValidationResult,
typing.Awaitable[validation_result.ValidationResult],
],
]:
raise NotImplementedError()
__all__ = ("AgentsTransport",)
| [
"mariika-sun@yandex.ru"
] | mariika-sun@yandex.ru |
c48e26b21f99b0662ed6f8fdad61ca2dc45676e3 | 8e25d54624d7f5393ebb2887368c28ec9323bab8 | /simulators/legacy/archived code/reference_src/legacy/backend/src/__main__.py | e2e7bfd6f714b28899442c9e08a01d2d022d5788 | [] | no_license | dane00a/mrover-workspace | e31f04a2ca06061b4958892d539eb14b0baac889 | 31c15cc47364ab936743ecb34bf0e9bd72f712f3 | refs/heads/master | 2020-03-30T08:29:15.415492 | 2020-02-02T19:23:01 | 2020-02-02T19:23:01 | 237,822,284 | 1 | 0 | null | 2020-02-02T19:13:19 | 2020-02-02T19:13:18 | null | UTF-8 | Python | false | false | 8,972 | py | # Hola, and welcome to the simulator
# this file contains a lot of dependencies, init stuff,
# and lcm interfacing. if you're looking for underlying
# code as to how the simulator initializes, this file
# is for you. if not, check other files
# CGI AND THREADING (as a reminder)
# Structural notes:
# So I'm thinking we should initialize lcm and all sim
# objects in this file, as well as their associated methods,
# however we also should make sure this file doesn't get too
# long and unmanagable, which is why I'd like to write the
# high-level functionality in simHandler.py to make it easier
# to manage and debug
from .simHandler import runSimulator
from rover_common import aiolcm
from abc import ABC
import math # , abstractmethod
# import threading # for later for perf improvements
# import time # for later, for more accurate information and logging
import asyncio
from rover_common.aiohelper import run_coroutines
# from rover_msgs import DanesMsg
from rover_msgs import (NavStatus, Joystick, GPS, AutonState,
Course, Obstacle, TennisBall)
import mathUtils
# per Faruk's advice, the simulator is now contained in a metaclass
class SimulatorMetaClass:
# variables defined here are common to all classes
# ideally it shouldn't matter bc we only ever need one instance
# this is bad practice imho, just defined vars in the block below
def __init__(self): # other args if ya want em
# all initially defined variables should be here
# while not technically globals, this is basically where they are
# defined for the sim, since the entire instance is the SimMetaClass
Obstacles = []
Waypoints = []
Tennis_Balls = []
# below: class list, one class for each message type
# published or recieved. instantiate them at the bottom
# of this message definition block
# use the provided imported classes and dump these later
# you still need to set all the defaults
self.NavStatusMsg = NavStatus()
self.JoystickMsg = Joystick()
# self.GPSMsg = GPS()
# self.BearingMsg = Bearing()
self.GPSMsg = GPS()
self.ObstacleMsg = Obstacle()
self.TennisBallMsg = TennisBall()
self.CourseMsg = Course()
self.AutonStateMsg = AutonState()
self.NavStatusMsg.nav_state = 0
self.NavStatusMsg.completed_wps = 0
self.NavStatusMsg.missed_wps = 0
self.NavStatusMsg.total_wps = 0
self.JoystickMsg.forward_back = 0
self.JoystickMsg.left_right = 0
self.JoystickMsg.dampen = 0
self.GPSMsg.latitude_deg = 39
self.GPSMsg.latitude_min = 0
self.GPSMsg.longitude_deg = -110
self.GPSMsg.longitude_min = 0
self.GPSMsg.bearing = 0
self.GPSMsg.speed = -999 # this value is never used
# so it's being set to a dummy value. DO NOT USE IT
self.ObstacleMsg.detected = 0
self.ObstacleMsg.bearing = 0
self.TennisBallMsg.found = 0
self.TennisBallMsg.bearing = 0
self.TennisBallMsg.distance = 0
self.CourseMsg.num_waypoints = 0
self.CourseMsg.hash = 0
self.CourseMsg.waypoints = []
self.AutonStateMsg.is_auton = False
# definitions for message processing are below, with callbacks (cb)
# at the top and publishing at the bottom
# in this setup, camelCasing denotes a class instance
# while under_scored_variables indicate a variable within the class
# to avoid confusion
def nav_test(self, channel, msg):
pass
# define this as per the spec
def nav_state_cb(self, channel, msg):
m = NavStatus.decode(msg)
self.NavStatusMsg.nav_state = m.nav_state
self.NavStatusMsg.completed_wps = m.completed_wps
self.NavStatusMsg.missed_wps = m.missed_wps
self.NavStatusMsg.total_wps = m.total_wps
def joystick_cb(self, channel, msg):
m = Joystick.decode(msg)
self.JoystickMsg.forward_back = m.forward_back
self.JoystickMsg.left_right = m.left_right
self.JoystickMsg.dampen = m.dampen
#1-dampen/2
self.JoystickMsg.kill = m.kill
self.JoystickMsg.restart = m.restart
# async def publish_bearing(self, lcm):
# while True:
# lcm.publish("\bearing", self.BearingMsg.encode())
# await asyncio.sleep(10)
async def publish_auton_state(self, lcm):
while True:
lcm.publish("\auton", self.AutonStateMsg.encode())
await asyncio.sleep(10)
# async def publish_gps_state(self, lcm):
# while True:
# lcm.publish("\GPS", self.GPSMsg.encode())
# await asyncio.sleep(10)
# bearing publish
async def publish_GPS(self, lcm):
while True:
lcm.publish("\GPS", self.GPSMsg.encode())
await asyncio.sleep(10)
async def publish_course(self, lcm):
while True:
lcm.publish("\course", self.CourseMsg.encode())
await asyncio.sleep(10)
async def publish_obstacle(self, lcm):
while True:
lcm.publish("\obstacle", self.ObstacleMsg.encode())
await asyncio.sleep(10)
async def publish_tennis_ball(self, lcm):
while True:
lcm.publish("\tennis_ball", self.TennisBallMsg.encode())
await asyncio.sleep(10)
# SimObject definitions are below
# SimObj is the abstract base class that contains properties
# common to all objects. define additional simulator objects
# as if you would the Rover class, including proper
# superclass init
# identical to the GPS message, minus speed, bc it's a useful
# object to have internally
class GPS:
def __init__(self, lat0, latm0, lon0, lonm0, bearing, speed):
self.lat_deg = lat0
self.lat_min = latm0
self.lon_deg = lon0
self.lon_min = lonm0
self.bearing = bearing
self.speed = speed
# parent class of sim objects. Has all properties common to all
# objects
class SimObj(ABC):
# define initial location and other properties
def __init__(self, GPS):
self.lat_deg = GPS.lat0
self.lat_min = GPS.latm0
self.lon_deg = GPS.lon0
self.lon_min = GPS.lonm0
self.bearing = GPS.bearing0
self.shape = 0 # need to create a seed system?
# any methods common to all classes should be defined
def get_coords(self):
return [self.lat_deg, self.lat_min,
self.lon_deg, self.lon_min]
def get_bearing(self):
return self.bearing
# here is an abstract method, may be useful
# @abstractmethod
# def sample_abs_method(self):
# pass
class Field(SimObj):
def __init__(self, GPS, radius=2): # other properties
super().__init__(GPS)
self.radius = radius # in degrees, if not specified
# radius is 2
class Rover(SimObj):
def __init__(self, GPS, speed_trans=1,
speed_rot=1):
super().__init__(GPS)
self.fov = 120 # units of degrees,
# 120 if not specified
self.cv_thresh = 5
self.speed_translational = speed_trans
# speed multiplier, 1 if not specified
self.speed_rotational = speed_rot
class TennisBall(SimObj):
def __init__(self, GPS): # other properties
super().__init__(GPS)
self.other_prop = 0
class Obstacle(SimObj):
def __init__(self, GPS): # other properties
super().__init__(GPS)
class Waypoint(SimObj):
def __init__(self, GPS, searchable=0):
super().__init__(GPS)
self.search = searchable # defaults to false if not set
def main():
# how you get lcm messages
lcm = aiolcm.AsyncLCM()
# instantiates Simulator class
Simulator = SimulatorMetaClass()
# constantly queries lcm server
lcm.subscribe("\nav_state", Simulator.nav_state_cb)
lcm.subscribe("\drive_control", Simulator.joystick_cb)
# creates loop to execute this code repeatedly with the lcm
run_coroutines(lcm.loop(), Simulator.publish_auton_state(lcm),
Simulator.publish_course(lcm),
Simulator.publish_GPS(lcm),
Simulator.publish_obstacle(lcm),
Simulator.publish_tennis_ball(lcm),
runSimulator(Simulator))
# as a general improvement, it may be worth threading all of the
# lcm-related bruhaha to offload the worst of the performance hits
# as the sim becomes more complex and computationally intensive
# time to run this mf'er
runSimulator(Simulator)
# also necessary for the build system, idk why
if __name__ == "__main__":
main()
| [
"dane00a@users.noreply.github.com"
] | dane00a@users.noreply.github.com |
b6d39c4c57bb115fd600f6cd1da0dccb53670fc2 | 8421d00f78eb44cd4c463ffd1802b224401df5ca | /firstNetWork.py | 5776b51f7d16490731aea46fc29dd247bd75eb69 | [] | no_license | pntehan/study-for-tensorflow | c07307e4a73998bd96afa3eeac53b0c4fb2982e3 | 4993a4539ca23cfe4135c81418241f90d7b2343b | refs/heads/master | 2020-06-08T22:45:15.501834 | 2019-08-30T07:10:29 | 2019-08-30T07:10:29 | 193,320,372 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,700 | py | import tensorflow as tf
from numpy.random import RandomState
# 定义全局变量
BATCH_SIZE = 8
LR = 1e-3
STEPS = 5000
# 定义变量与placeholder
w1 = tf.Variable(tf.random_normal([2, 3], stddev=1, seed=1))
w2 = tf.Variable(tf.random_normal([3, 1], stddev=1, seed=1))
x = tf.placeholder(tf.float32, shape=(None, 2), name="x-input")
y = tf.placeholder(tf.float32, shape=(None, 1), name="y-input")
# 定义神经网络的前向传播过程
a = tf.matmul(x, w1)
y_pred = tf.matmul(a, w2)
# 定义损失函数与反向传播算法
y_logit = tf.sigmoid(y_pred)
cross_entropy = -tf.reduce_mean(
y*tf.log(tf.clip_by_value(y_logit, 1e-10, 1.0))+(1-y)*tf.log(tf.clip_by_value(1-y_logit, 1e-10, 1.0))
)
train_step = tf.train.AdamOptimizer(LR).minimize(cross_entropy)
# 定义模拟数据
rdm = RandomState(1)
dataSet_size = 128
X = rdm.rand(dataSet_size, 2)
Y = [[int(x1+x2<1)] for (x1, x2) in X]
# 激活张量开始训练
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op)
print("Before Training...")
print("w1:\n{}\nw2:\n{}".format(sess.run(w1), sess.run(w2)))
for epoch in range(100):
for i in range(STEPS):
start = (i*BATCH_SIZE)%dataSet_size
end = min(start+BATCH_SIZE, dataSet_size)
sess.run(train_step, feed_dict={x: X[start:end], y: Y[start:end]})
if (i+1)%1000 == 0:
total_cross_entropy = sess.run(cross_entropy, feed_dict={x: X, y: Y})
print("EPOCH [%2d] After %d training steps, cross entropy on all data is %g"%(epoch+1, i+1, total_cross_entropy))
print("After Training...")
print("w1:\n{}\nw2:\n{}".format(sess.run(w1), sess.run(w2)))
| [
"pntehan@foxmail.com"
] | pntehan@foxmail.com |
b30fa20dc7f259c83f94f96e064066d774ee0803 | fceb14fee6be94d793620dce4f22fb15b3f041f1 | /hadoop/archive/hadoop-python-2.9.0/python/deprecated/trainingReducer1.py | 9d5d5587ca020172974c6d9803de2bb4a53984b2 | [] | no_license | cloudmesh-community/hid-sp18-405 | 1429242ff534a4dbe8e6ad0bdea04fdbc14dbc82 | 325ec661739bc4241dddf0eb3ee817bf51b8165f | refs/heads/master | 2021-09-13T20:23:26.128208 | 2018-05-03T22:39:44 | 2018-05-03T22:39:44 | 119,435,402 | 0 | 5 | null | 2018-04-24T06:54:16 | 2018-01-29T20:11:51 | Python | UTF-8 | Python | false | false | 1,261 | py | #!/usr/bin/env python
'''
trainingReducer.py
Author: Min Chen <mc43@iu.edu>
Date: 2018-04-07
Last change: 2018-04-07
The input of this file is from stdin, and is the output of trainingMapper.py
The output of this file serves as both input for testingMapper.py and
trainingMapper2.py
'''
from itertools import groupby
from operator import itemgetter
import sys
def read_mapper_output(file, separator='\t'):
for line in file:
yield line.rstrip().split(separator, 1)
def main(separator='\t'):
# input comes from STDIN (standard input)
data = read_mapper_output(sys.stdin, separator=separator)
# groupby groups multiple word-count pairs by word,
# and creates an iterator that returns consecutive keys and their group:
# current_word - string containing a word (the key)
# group - iterator yielding all ["<current_word>", "<count>"] items
for current_word, group in groupby(data, itemgetter(0)):
try:
total_count = sum(int(count) for current_word, count in group)
print ("%s%s%d" % (current_word, separator, total_count))
except ValueError:
# count was not a number, so silently discard this item
pass
if __name__ == "__main__":
main() | [
"min.chen.1015@gmail.com"
] | min.chen.1015@gmail.com |
2a8baba7315e41a92db6cbdea5ef52408fcb9143 | fd308ec9cb448c2b47ec4736b670ce452b70800d | /sdap/jobs/views.py | b66d02ed32f0fabf23b619815af97c5d649e5c8f | [
"MIT"
] | permissive | umr1085-irset/reproGenomicsViewer | d4d8c52fbe72a1824812d2f5a7574be7ce58f9b5 | 700bbe817596b27a05ed75a8761506c5004f340f | refs/heads/master | 2023-05-25T01:55:42.333286 | 2023-05-15T10:02:54 | 2023-05-15T10:02:54 | 219,467,036 | 0 | 4 | MIT | 2022-02-11T09:44:32 | 2019-11-04T09:46:02 | JavaScript | UTF-8 | Python | false | false | 3,463 | py | import os
from datetime import datetime
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from django.views import View
from django.shortcuts import redirect
from django.conf import settings
from django.core.files.storage import FileSystemStorage
from django.contrib.auth.decorators import login_required
from django.contrib.auth.mixins import LoginRequiredMixin
from django.contrib import messages
from django.http import HttpResponse
from mimetypes import guess_type
from django.urls import reverse_lazy
from sdap.jobs.models import Job
from celery.result import AsyncResult
# Create your views here.
class IndexView(LoginRequiredMixin, generic.ListView):
template_name = 'analyse/index.html'
context_object_name = 'latest_analyse_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Job.objects.filter(
created_at__lte=timezone.now()
).order_by('-created_at')[:5]
def DetailView(request, pk):
job = get_object_or_404(Job, pk=pk)
file_list = []
for file in os.listdir(job.output):
table_content=''
info = os.path.splitext(file)
if info[1] == ".matrix" or info[1] == ".tsv" or info[1] == ".info" or info[1] == ".list" :
df = pd.read_csv(os.path.join(job.output, file), sep="\t")
df_head = df.head()
table_content = df_head.to_html(classes=["table", "table-bordered", "table-striped", "table-hover"])
if info[1] == ".csv":
df = pd.read_csv(os.path.join(job.output, file), sep=",")
df_head = df.head()
table_content = df_head.to_html(classes=["table", "table-bordered", "table-striped", "table-hover"])
file_list.append({'name':info[0],'ext':info[1], 'table':table_content, 'file':file, 'path':os.path.join(job.output, file)})
context = {'job':job, 'file_list':file_list}
return render(request, 'jobs/results.html', context)
def DownloadView(request, pk, file_name):
job = get_object_or_404(Job, pk=pk)
file_path = os.path.join(job.output, file_name)
with open(file_path, 'rb') as f:
response = HttpResponse(f, content_type=guess_type(file_path)[0])
response['Content-Length'] = len(response.content)
return response
class RunningJobsView(LoginRequiredMixin, generic.ListView):
model = Job
template_name = 'jobs/running_jobs.html'
def get_context_data(self, **kwargs):
context = super(RunningJobsView, self).get_context_data(**kwargs)
context['jobs_list'] = Job.objects.filter(created_by__exact=self.request.user.id)
for job in context['jobs_list']:
if job.status != "SUCCESS":
job.status = AsyncResult(job.celery_task_id).state
job.save()
return context
def Delete_job(request, pk):
job = get_object_or_404(Job, pk=pk)
print(job)
job.delete()
context = {}
context['jobs_list'] = Job.objects.filter(created_by__exact=request.user.id)
for job in context['jobs_list']:
if job.status != "SUCCESS":
job.status = AsyncResult(job.celery_task_id).state
job.save()
return render(request, 'jobs/running_jobs.html', context) | [
"dardethomas@gmail.com"
] | dardethomas@gmail.com |
7654c450b5dfd23743fe2bd6a5541032050b3abb | d47fbef4fabde889c1cdedcceb60df40b90625b2 | /The Unicator/Client/TestConnexion.py | e03a3523918038a4cb9484ee753c8dee94d3a01d | [] | no_license | gpiche/Projet-Exploration-2017-2018 | 459988da13e9d56be3394fa7554a941f17826d1b | 59d7a76b30769c4702a9ef20b68106c379fded7a | refs/heads/master | 2021-08-24T09:32:08.259665 | 2017-12-09T02:10:45 | 2017-12-09T02:10:45 | 108,165,763 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 549 | py | import socket
MAX_RECV = 1024 * 1024 * 512
class Connexion:
def __init__(self):
self.host = '192.168.1.103'
self.port = 50010
self.clientSocket = socket.socket()
self.connect()
def connect(self):
self.clientSocket.connect((self.host, self.port))
def send(self, text):
self.clientSocket.send(bytes(str(text), 'UTF-8'))
def receive(self):
answer = self.clientSocket.recv(MAX_RECV).decode('UTF-8')
return answer
def close(self):
self.clientSocket.close() | [
"remikya.hellal@gmail.com"
] | remikya.hellal@gmail.com |
b26307c5e31b8ab3a89d2da60b1358d6b3f6cf7a | c071eb46184635818e8349ce9c2a78d6c6e460fc | /system/python_stubs/-745935208/PyQt5/QtWidgets/QGraphicsObject.py | ecd995203686bcbec70e38142191c515aad5938a | [] | no_license | sidbmw/PyCharm-Settings | a71bc594c83829a1522e215155686381b8ac5c6e | 083f9fe945ee5358346e5d86b17130d521d1b954 | refs/heads/master | 2020-04-05T14:24:03.216082 | 2018-12-28T02:29:29 | 2018-12-28T02:29:29 | 156,927,399 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,227 | py | # encoding: utf-8
# module PyQt5.QtWidgets
# from C:\Users\siddh\AppData\Local\Programs\Python\Python37\lib\site-packages\PyQt5\QtWidgets.pyd
# by generator 1.146
# no doc
# imports
import PyQt5.QtCore as __PyQt5_QtCore
import PyQt5.QtGui as __PyQt5_QtGui
import sip as __sip
from .QGraphicsItem import QGraphicsItem
class QGraphicsObject(__PyQt5_QtCore.QObject, QGraphicsItem):
""" QGraphicsObject(parent: QGraphicsItem = None) """
def childEvent(self, *args, **kwargs): # real signature unknown
pass
def connectNotify(self, *args, **kwargs): # real signature unknown
pass
def contextMenuEvent(self, *args, **kwargs): # real signature unknown
pass
def customEvent(self, *args, **kwargs): # real signature unknown
pass
def disconnectNotify(self, *args, **kwargs): # real signature unknown
pass
def dragEnterEvent(self, *args, **kwargs): # real signature unknown
pass
def dragLeaveEvent(self, *args, **kwargs): # real signature unknown
pass
def dragMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def dropEvent(self, *args, **kwargs): # real signature unknown
pass
def enabledChanged(self): # real signature unknown; restored from __doc__
""" enabledChanged(self) [signal] """
pass
def event(self, QEvent): # real signature unknown; restored from __doc__
""" event(self, QEvent) -> bool """
return False
def focusInEvent(self, *args, **kwargs): # real signature unknown
pass
def focusOutEvent(self, *args, **kwargs): # real signature unknown
pass
def grabGesture(self, Qt_GestureType, flags, Qt_GestureFlags=None, Qt_GestureFlag=None, *args, **kwargs): # real signature unknown; NOTE: unreliably restored from __doc__
""" grabGesture(self, Qt.GestureType, flags: Union[Qt.GestureFlags, Qt.GestureFlag] = Qt.GestureFlags()) """
pass
def hoverEnterEvent(self, *args, **kwargs): # real signature unknown
pass
def hoverLeaveEvent(self, *args, **kwargs): # real signature unknown
pass
def hoverMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def inputMethodEvent(self, *args, **kwargs): # real signature unknown
pass
def inputMethodQuery(self, *args, **kwargs): # real signature unknown
pass
def isSignalConnected(self, *args, **kwargs): # real signature unknown
pass
def itemChange(self, *args, **kwargs): # real signature unknown
pass
def keyPressEvent(self, *args, **kwargs): # real signature unknown
pass
def keyReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseDoubleClickEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseMoveEvent(self, *args, **kwargs): # real signature unknown
pass
def mousePressEvent(self, *args, **kwargs): # real signature unknown
pass
def mouseReleaseEvent(self, *args, **kwargs): # real signature unknown
pass
def opacityChanged(self): # real signature unknown; restored from __doc__
""" opacityChanged(self) [signal] """
pass
def parentChanged(self): # real signature unknown; restored from __doc__
""" parentChanged(self) [signal] """
pass
def prepareGeometryChange(self, *args, **kwargs): # real signature unknown
pass
def receivers(self, *args, **kwargs): # real signature unknown
pass
def rotationChanged(self): # real signature unknown; restored from __doc__
""" rotationChanged(self) [signal] """
pass
def scaleChanged(self): # real signature unknown; restored from __doc__
""" scaleChanged(self) [signal] """
pass
def sceneEvent(self, *args, **kwargs): # real signature unknown
pass
def sceneEventFilter(self, *args, **kwargs): # real signature unknown
pass
def sender(self, *args, **kwargs): # real signature unknown
pass
def senderSignalIndex(self, *args, **kwargs): # real signature unknown
pass
def timerEvent(self, *args, **kwargs): # real signature unknown
pass
def ungrabGesture(self, Qt_GestureType): # real signature unknown; restored from __doc__
""" ungrabGesture(self, Qt.GestureType) """
pass
def updateMicroFocus(self): # real signature unknown; restored from __doc__
""" updateMicroFocus(self) """
pass
def visibleChanged(self): # real signature unknown; restored from __doc__
""" visibleChanged(self) [signal] """
pass
def wheelEvent(self, *args, **kwargs): # real signature unknown
pass
def xChanged(self): # real signature unknown; restored from __doc__
""" xChanged(self) [signal] """
pass
def yChanged(self): # real signature unknown; restored from __doc__
""" yChanged(self) [signal] """
pass
def zChanged(self): # real signature unknown; restored from __doc__
""" zChanged(self) [signal] """
pass
def __init__(self, parent=None): # real signature unknown; restored from __doc__
pass
| [
"siddharthnatamai@gmail.com"
] | siddharthnatamai@gmail.com |
b38985979aa352629fbde500dc71def53b436389 | dae40242ea990dad66e00e6dd4357bb8caeb7efc | /MOG.py | 0628bbc744641be61a4a754ca9774f14ade1f22b | [] | no_license | anexd/projeto | e9aa20d66953d7b382d674085d221238ca9c62e9 | a3ce65adb529803859f9d7564c02f0f95d5a303e | refs/heads/master | 2020-08-03T12:23:07.682155 | 2019-09-30T09:16:57 | 2019-09-30T09:16:57 | 211,751,831 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | import cv2
import numpy as np
cap = cv2.VideoCapture(0)
fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()
while(1):
ret, frame = cap.read()
fgmask = fgbg.apply(frame)
cv2.imshow('frame',fgmask)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
cap.release()
cv2.destroyAllWindows()
| [
"rayane_morgan@hotmail.com"
] | rayane_morgan@hotmail.com |
862bdff494b00a3d2dbf648b2ffde351f10d9483 | 5f271a8d1a7e8932bb3e1a4ca3ac410b74897380 | /vaccine_feed_ingest/ingestors/arcgis_ingest.py | 1b358e0c44d196ec6e2b12de057894a49df9070a | [
"MIT"
] | permissive | CAVaccineInventory/vaccine-feed-ingest | d3ee5fa2b165b8fb334900055f6eaf6e1d2efa9d | 48f777d24d2130c26ce071d6b62909d5ea5b97d5 | refs/heads/main | 2023-07-20T12:59:37.575426 | 2021-07-13T14:56:29 | 2021-07-13T14:56:29 | 355,024,616 | 28 | 49 | MIT | 2021-08-30T07:55:07 | 2021-04-06T01:49:34 | Python | UTF-8 | Python | false | false | 3,702 | py | #!/usr/bin/env python3
import json
from os.path import join
from typing import Optional, Sequence
import urllib3
from arcgis import GIS
from vaccine_feed_ingest.utils.log import getLogger
http = urllib3.PoolManager()
logger = getLogger(__file__)
def fetch_geojson(
service_item_id: str,
output_dir: str,
selected_layers: Optional[Sequence[str]] = None,
) -> None:
"""Save selected layers of the arcgis service item"""
gis = GIS()
item = gis.content.get(service_item_id)
if selected_layers is not None:
suggest_changing_selected_layers(
service_item_id,
[layer.properties.name for layer in item.layers],
selected_layers,
)
for layer in item.layers:
if selected_layers is not None:
if layer.properties.name not in selected_layers:
continue
results = layer.query(return_all_records=True, out_sr=4326)
layer_id = layer.properties.id
for feature in results:
feature.attributes.update(
{
"layer_id": layer_id,
"service_item_id": service_item_id,
}
)
file_name = f"{service_item_id}_{layer_id}.json"
logger.info(f"Saving {layer.properties.name} layer to {file_name}")
results.save(output_dir, file_name)
def suggest_changing_selected_layers(
service_item_id: str,
found_layers: Sequence[str],
selected_layers: Sequence[str],
) -> None:
"""
Utility logging:
* Warn if unavailable layers are requested.
* Inform if available layers are not requested.
"""
found_set = set(found_layers)
selected_set = set(selected_layers)
extra_layers = selected_set - found_set
missed_layers = found_set - selected_set
if len(extra_layers) > 0:
logger.warn(
"%s - requested layers which do not exist - %s",
service_item_id,
extra_layers,
)
if len(missed_layers) > 0:
logger.info(
"%s - additional layers available but not selected - %s",
service_item_id,
missed_layers,
)
def get_count(query_url: str) -> int:
"""
Get the total count of features in this ArcGIS feed.
This will be used for querying results in batches.
"""
r = http.request(
"GET",
query_url,
fields={"where": "1=1", "returnCountOnly": "true", "f": "json"},
)
obj = json.loads(r.data.decode("utf-8"))
return obj["count"]
def get_results(
query_url: str, offset: int, batch_size: int, output_dir: str, format: str
) -> None:
"""Fetch one batch of ArcGIS features from the query_url"""
# Set Output Spatial reference to EPSG 4326 GPS coords
out_sr = "4326"
r = http.request(
"GET",
query_url,
fields={
"where": "1=1",
"outSR": out_sr,
"f": format,
"outFields": "*",
"returnGeometry": "true",
"orderByFields": "objectId ASC",
"resultOffset": offset,
"resultRecordCount": batch_size,
},
)
output_file = join(output_dir, f"{offset}.json")
with open(output_file, "wb") as fh:
logger.info(f"Writing {output_file}")
fh.write(r.data)
def fetch(
query_url: str, output_dir: str, batch_size: int = 50, format: str = "geojson"
) -> None:
"""Fetch ArcGIS features in chunks of batch_size"""
count = get_count(query_url)
logger.info(f"Found {count} results")
for offset in range(0, count, batch_size):
get_results(query_url, offset, batch_size, output_dir, format)
| [
"noreply@github.com"
] | noreply@github.com |
53d65993d82dfc4255d5d92ca20acb1a2c3efe2c | 0c18fbdc2f1aaeb9ac1ef36a60071f727c76fe91 | /Badger | 84383abadfcea6ec58ff57ad8d074729888be634 | [] | no_license | bomer123/python-1 | 49a916d4048791ff9be8d9d930bdaf68cc03c380 | 1261281363ec97ff95f7130aefe6ee12a46f1667 | refs/heads/master | 2016-09-05T12:58:15.793292 | 2015-07-15T12:40:12 | 2015-07-15T12:40:12 | 39,134,619 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 366 | #!/usr/bin/env python3
import colors as c
import time
count_length = 0.387
while True:
c.clear_screen()
for verse in range(5):
for badger in range(11):
print(c.yellow + 'Badger!' + c.reset)
time.sleep(count_length)
for mushroom in range(2):
print(c.magenta + 'Mushroom.' + c.reset)
time.sleep(count_length * 2)
c.clear_screen()
| [
"learn@skilstak.com"
] | learn@skilstak.com | |
f555c688d0fc632999966fdd07767c457fa0ab21 | be12916ec075c76469946f6da2cdde4857141702 | /771_2.py | 3c283b6a4ace661e675538e93277637a7552e724 | [] | no_license | goodsosbva/algorithm | 7b2f9b1a899e43c7d120ab9d40e672be336a23d9 | 0bc44e9466d2d32a4b4e126e073badc60172da6e | refs/heads/main | 2023-08-06T19:34:40.630006 | 2021-09-20T10:34:41 | 2021-09-20T10:34:41 | 334,918,056 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 361 | py | import collections
def newJewelsInStones(J: str, S: str) -> int:
freqs = collections.Counter(S) # 돌(S) 빈도 수 계산
count = 0
# 비교 없이 보석(j) 빈도 수 계산
for char in J:
count += freqs[char]
return count
jew = "aA"
stone = "aAAbbbb"
jcount = newJewelsInStones(jew, stone)
print(jcount) | [
"noreply@github.com"
] | noreply@github.com |
0cc0ef4ebd254b2ca9ff059b20fc0e76da881cec | 7e3aa40777708ca1f972cdbe421e09be35f827fc | /DEN/DenRSU-2.py | 528c458893f3c911fb283ae06c8d8167576d343f | [
"MIT"
] | permissive | amrishAK/ITS-Simulation-Framework | 80c437085eff4c508b41fa24b3835d52debf64c5 | 3b482a542407655ee8f97e6a62bf73e97cf8265d | refs/heads/master | 2022-12-13T09:33:38.246616 | 2020-09-14T20:13:09 | 2020-09-14T20:13:09 | 295,527,545 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,486 | py | from Framework.Setup import Setup
from Framework.BehaviourAgent import BaseRsuAgent
from Framework.BroadcastHandler import BroadCaster
from Framework.CarlaVizUtil import CarlaPainter
from Framework.Decrators import rsuAgent
from Framework.Container import Container
import json
import time
import random
@rsuAgent
class DenRSU(BaseRsuAgent):
painter = CarlaPainter('localhost',8089)
def __init__(self,container:Container):
self.__Container = container
def GenerateMessage(self):
data = {}
data['msgType'] = 'DEN'
den = {}
den['DenId'] = random.random()
den['Type'] = "SpeedLimit"
den['Speed'] = "20"
data['msg'] = den
return json.dumps(data)
def Setup(self):
try:
location = self.__Container.GetActor().get_location()
# texts = ["Speed Limit 20"]
# self.painter.draw_texts(texts,[[location.x,location.y,10]])
self.painter.draw_polylines([[location.x+5,location.y+5,0],[location.x-5,location.y-5,0],[location.x-5,location.y+5,0],[location.x+5,location.y-5,0]])
self.__Message = self.GenerateMessage()
except Exception as ex:
print(ex)
def RunStep(self):
broadCast = BroadCaster()
broadCast.R2V(self.__Message)
time.sleep(5)
if __name__ == "__main__":
try:
print("first")
Setup("RSU-2Config.json")
except KeyboardInterrupt:
print("close!!")
| [
"aamrish007@live.com"
] | aamrish007@live.com |
12e27a1ffb5b0c67c43f8b2c732fb638e6d2fa70 | 6b5431368cb046167d71c1f865506b8175127400 | /challenges/n-primeiros-numeros-primos/tests.py | f921a7fcdf0065d0389c1479673886c2e732c226 | [] | no_license | Insper/design-de-software-exercicios | e142f4824a57c80f063d617ace0caa0be746521e | 3b77f0fb1bc3d76bb99ea318ac6a5a423df2d310 | refs/heads/master | 2023-07-03T12:21:36.088136 | 2021-08-04T16:18:03 | 2021-08-04T16:18:03 | 294,813,936 | 0 | 1 | null | 2021-08-04T16:18:04 | 2020-09-11T21:17:24 | Python | UTF-8 | Python | false | false | 910 | py | from strtest import str_test
def eh_primo_gabarito(n):
if n == 2:
return True
if n == 0 or n == 1 or n % 2 == 0:
return False
d = 3
while d < n:
if n % d == 0:
return False
d += 2
return True
def gabarito_dos_professores(n):
encontrados = 0
i = 2
primos = []
while encontrados < n:
if eh_primo_gabarito(i):
primos.append(i)
encontrados += 1
i += 1
return primos
class TestCase(str_test.TestCaseWrapper):
TIMEOUT = 2
def test_1(self):
n = 100
primos = gabarito_dos_professores(n)
retval = self.function(n)
self.assertEqual(n, len(retval), 'Deveria ter exatamente n números primos')
for primo, returned in zip(primos, retval):
self.assertEqual(primo, returned, 'Não retornou os n primeiros primeos em ordem crescente')
| [
"andrew.kurauchi@gmail.com"
] | andrew.kurauchi@gmail.com |
7e7fb4267b2f7f099864aa8fc936aaea70fc2673 | 22e5f79bf23df10948f3bb92e85c3a0ba7ba578b | /p3_classification/hc_new.py | 60869f24c3971972b87dc5c9881070b482af92ed | [] | no_license | Sanushi-Salgado/Tumor-Teller-Prediction-Module | 37a374ddf87c2f8f4a4de7a6682d04dd0c0d585d | db3c4da1f5d36ead78af77f4d9b89db8b93eda01 | refs/heads/master | 2022-12-04T02:10:30.678563 | 2020-08-24T21:46:55 | 2020-08-24T21:46:55 | 264,960,728 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 26,171 | py | # Author: Sanushi Salgado
import warnings
from time import time
import numpy as np
import pandas as pd
import seaborn as sns
# from evaluation.model_evaluation import print_evaluation_results
from boruta import BorutaPy
from imblearn.ensemble import BalancedBaggingClassifier
from imblearn.over_sampling import SMOTE, BorderlineSMOTE, RandomOverSampler
from imblearn.pipeline import Pipeline, make_pipeline
from nested_cv import NestedCV
from prince import MCA
from scipy.stats import entropy
from sklearn.dummy import DummyClassifier
from sklearn.ensemble import RandomForestClassifier, StackingClassifier, GradientBoostingClassifier, \
ExtraTreesClassifier, BaggingClassifier
from sklearn.exceptions import DataConversionWarning, ConvergenceWarning
from sklearn.externals import joblib
from sklearn.feature_selection import SelectKBest, chi2, f_classif, mutual_info_classif, RFE, GenericUnivariateSelect, \
RFECV
from sklearn.linear_model import LogisticRegression, RidgeClassifier
from sklearn.metrics import roc_curve, classification_report
from sklearn.model_selection import train_test_split, GridSearchCV, RepeatedStratifiedKFold, KFold, cross_val_score, \
StratifiedKFold, cross_val_predict
from sklearn.multiclass import OneVsRestClassifier
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.pipeline import FeatureUnion
from sklearn.svm import SVC
from yellowbrick.classifier import confusion_matrix
from yellowbrick.model_selection import CVScores, ValidationCurve, LearningCurve
from p1_eda.eda import get_details, check_duplicates, perform_correspondence_analysis
from p2_preprocessing.data_cleansing import impute_missing_values, perform_one_hot_encoding
from p2_preprocessing.feature_selection import get_feature_correlations
from p3_classification.baseline import get_baseline_performance
from p3_classification.spot_check import spot_check_algorithms
from p3_classification.upper_region_classifier import upper_region_classifier
from p5_evaluation.model_evaluation import print_evaluation_results, plot_confusion_matrix
warnings.filterwarnings('ignore', category=ConvergenceWarning)
warnings.filterwarnings('ignore', category=FutureWarning)
warnings.filterwarnings('ignore', category=DeprecationWarning)
warnings.filterwarnings('ignore', category=DataConversionWarning)
warnings.filterwarnings('ignore', category=UserWarning)
warnings.filterwarnings('ignore', category=RuntimeWarning)
UR_CLASSES = ['2', '4', '10']
TR_CLASSES = ['1', '22']
IP_CLASSES = ['3', '5', '7', '11', '12', '13']
EP_CLASSES = ['8', '14', '15', '17', '18', '19', '20']
TOP_LEVEL_TARGET = 'region'
SECOND_LEVEL_TARGET = 'class'
DATA_DIRECTORY_PATH = "../resources/datasets/"
DATASET_FILE_NAME = "BNG_primary_tumor.csv"
SYNTHETIC_DATASET_FILE_NAME = "synthetic_minority_samples.csv"
DATASET_PATH = DATA_DIRECTORY_PATH + DATASET_FILE_NAME
TARGET_NAME = "class"
# Creates datasets for the second level sub classifiers
def create_separate_datasets(data_frame):
# Drop the region column since it is useless
data_frame.drop('region', axis=1, inplace=True)
data_frame.groupby(SECOND_LEVEL_TARGET).size()
# Remove all classes with only 1 instance
class_filter = data_frame.groupby(SECOND_LEVEL_TARGET)
data_frame = class_filter.filter(lambda x: len(x) > 1)
print("2nd Level - Class Count", data_frame.groupby(SECOND_LEVEL_TARGET).size())
ur_dataset = data_frame[data_frame['class'].isin(UR_CLASSES)]
print(ur_dataset.shape)
print(ur_dataset.groupby('class').size())
ur_dataset.to_csv('../resources/datasets/ur_dataset.csv', index=False)
tr_dataset = data_frame[data_frame['class'].isin(TR_CLASSES)]
print(tr_dataset.shape)
print(tr_dataset.groupby('class').size())
tr_dataset.to_csv('../resources/datasets/tr_dataset.csv', index=False)
ip_dataset = data_frame[data_frame['class'].isin(IP_CLASSES)]
print(ip_dataset.shape)
print(ip_dataset.groupby('class').size())
ip_dataset.to_csv('../resources/datasets/ip_dataset.csv', index=False)
ep_dataset = data_frame[data_frame['class'].isin(EP_CLASSES)]
print(ep_dataset.shape)
print(ep_dataset.groupby('class').size())
ep_dataset.to_csv('../resources/datasets/ep_dataset.csv', index=False)
def upper_region_classifier():
ur_dataset = pd.read_csv("../resources/datasets/ur_dataset.csv", na_values='?', dtype='category')
print("UR classes", ur_dataset.groupby(SECOND_LEVEL_TARGET).size())
# Separate training feature & training labels
X = ur_dataset.drop(['class'], axis=1)
y = ur_dataset['class']
# Spot check
# spot_check_algorithms(X, y)
# pipeline = Pipeline([
# ('bow', CountVectorizer()),
# ('classifier', BernoulliNB()),
# ])
# Create a cross-validation strategy
# StratifiedKFold cross-validation strategy to ensure all of our classes in each split are represented with the same proportion.
cv = RepeatedStratifiedKFold(n_splits=3, random_state=42)
# https://machinelearningmastery.com/automate-machine-learning-workflows-pipelines-python-scikit-learn/
# create feature union
features = []
# features.append(('pca', MCA(n_components=3)))
features.append(('select_best', SelectKBest(k=15)))
feature_union = FeatureUnion(features)
# create pipeline
estimators = []
estimators.append(('feature_union', feature_union))
estimators.append(('ROS', RandomOverSampler(random_state=42) ))
estimators.append(('logistic', RandomForestClassifier(random_state=13)))
model = Pipeline(estimators)
imba_pipeline = make_pipeline(RandomOverSampler(random_state=42), SelectKBest(k=15), RandomForestClassifier(random_state=13))
scores = cross_val_score(imba_pipeline, X, y, scoring='f1_micro', cv=cv, n_jobs=-1)
print("After oversampling mean", scores.mean())
############################################# Hyper-parameter Tuning ###########################################
params = {'n_estimators': [5, 10, 20, 30],
'max_depth': [4, 6, 10, 12],
'random_state': [13]}
new_params = {'randomforestclassifier__' + key: params[key] for key in params}
grid_imba = GridSearchCV(imba_pipeline, param_grid=new_params, cv=cv, scoring='f1_micro', return_train_score=True)
grid_imba.fit(X, y)
print(grid_imba.best_params_)
print(grid_imba.best_score_)
#refer - https://stackoverflow.com/questions/40057049/using-confusion-matrix-as-scoring-metric-in-cross-validation-in-scikit-learn
model = grid_imba.best_estimator_
sizes = np.linspace(0.3, 1.0, 10)
# Instantiate the classification model and visualizer
visualizer = LearningCurve(
model, cv=cv, scoring='f1_micro', train_sizes=sizes, n_jobs=4
)
visualizer.fit(X, y) # Fit the data to the visualizer
visualizer.show() # Finalize and render the figure
# rf = RandomForestClassifier()
# knn = KNeighborsClassifier(n_neighbors=2)
# scores = cross_val_score(rf, X, y, cv=cv, scoring='f1_micro')
# print(scores)
# print( "%s %s" % (scores.mean(), scores.std()) )
#
#
#
#
# # https://www.scikit-yb.org/en/latest/api/model_selection/cross_validation.html
# # Instantiate the classification model and visualizer
# model = MultinomialNB()
# visualizer = CVScores(model, cv=cv, scoring='f1_micro')
# visualizer.fit(X, y) # Fit the data to the visualizer
# visualizer.show()
# print("Mean: ", visualizer.cv_scores_mean_)
#
#
#
# cv = StratifiedKFold(n_splits=2, random_state=42)
# param_range = [5, 10, 15, 20]
# oz = ValidationCurve(
# RandomForestClassifier(), param_name="max_depth",
# param_range=param_range, cv=cv, scoring="f1_micro", n_jobs=4,
# )
# # Using the same game dataset as in the SVC example
# oz.fit(X, y)
# oz.show()
#
#
#
# sizes = np.linspace(0.3, 1.0, 10)
# # Instantiate the classification model and visualizer
# model = MultinomialNB()
# visualizer = LearningCurve(
# model, cv=cv, scoring='f1_micro', train_sizes=sizes, n_jobs=4
# )
# visualizer.fit(X, y) # Fit the data to the visualizer
# visualizer.show() # Finalize and render the figure
# joblib.dump(model, filename='../resources/models/ur_classifier.pkl')
def balance_dataset(original_dataframe):
instances_in_majority_class = original_dataframe.groupby(SECOND_LEVEL_TARGET).size().max()
print(instances_in_majority_class)
# load data
data_frame = pd.read_csv(DATA_DIRECTORY_PATH + 'pre_processed_bng_data.csv', na_values='?')
# Shuffle the Dataset.
shuffled_df = data_frame.sample(frac=1, random_state=4)
synthetic_df = None
classes_to_balance = [2, 3, 4, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22]
for i in range(len(classes_to_balance)):
synthetic_instances_df = shuffled_df.loc[shuffled_df['class'] == classes_to_balance[i]].sample(n=SAMPLE_SIZE,
random_state=42)
synthetic_df = pd.concat([synthetic_df, synthetic_instances_df])
def thoracic_region_classifier():
tr_dataset = pd.read_csv("../resources/datasets/tr_dataset.csv", na_values='?', dtype='category')
print("TR classes", tr_dataset.groupby(SECOND_LEVEL_TARGET).size())
# Separate training feature & training labels
X = tr_dataset.drop([SECOND_LEVEL_TARGET], axis=1)
y = tr_dataset[SECOND_LEVEL_TARGET]
# Spot check
spot_check_algorithms(X, y)
# Create a cross-validation strategy
cv = RepeatedStratifiedKFold(n_splits=2, random_state=42)
# https://machinelearningmastery.com/automate-machine-learning-workflows-pipelines-python-scikit-learn/
# create feature union
# features = []
# # features.append(('pca', MCA(n_components=3)))
# features.append(('select_best', SelectKBest(k=15)))
# # feature_union = FeatureUnion(features)
# feature_union = features
# # create pipeline
# estimators = []
# estimators.append(('feature_union', feature_union))
# estimators.append(('ROS', RandomOverSampler(random_state=42)))
# estimators.append(('logistic', RandomForestClassifier(random_state=13)))
# model = Pipeline(estimators)
imba_pipeline = make_pipeline(RandomOverSampler(random_state=42),
# SelectKBest(k=15),
GradientBoostingClassifier(n_estimators=20))
scores = cross_val_score(imba_pipeline, X, y, scoring='f1_micro', cv=cv, n_jobs=-1)
print("After oversampling mean", scores.mean())
# ############################################# Hyper-parameter Tuning ###########################################
# params = {'n_estimators': [5, 10, 20, 50],
# 'max_depth': [4, 6, 10, 12, 20],
# 'random_state': [13]}
params = {
# 'selector__k': [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
'n_estimators': [10, 20, 30],
'max_depth': [6, 10, 20, 30],
# 'max_depth': [1, 10, 20, 30],
'min_samples_split': [1, 10, 100]
# 'model__n_estimators': np.arange(10, 200, 10)
# 'C': [1, 10, 100]
}
new_params = {'gradientboostingclassifier__' + key: params[key] for key in params}
grid_imba = GridSearchCV(imba_pipeline, param_grid=new_params, cv=cv, scoring='f1_micro', return_train_score=True)
grid_imba.fit(X, y)
print(grid_imba.best_params_)
print(grid_imba.best_score_)
# refer - https://stackoverflow.com/questions/40057049/using-confusion-matrix-as-scoring-metric-in-cross-validation-in-scikit-learn
model = grid_imba.best_estimator_
#
# sizes = np.linspace(0.3, 1.0, 10)
# # Instantiate the classification model and visualizer
# visualizer = LearningCurve(
# model, cv=cv, scoring='f1_micro', train_sizes=sizes, n_jobs=4
# )
# visualizer.fit(X, y) # Fit the data to the visualizer
# visualizer.show() # Finalize and render the figure
# joblib.dump(model, filename='../resources/models/tr_classifier.pkl')
def ip_region_classifier():
ip_train_set = pd.read_csv("../resources/datasets/ip_train_set.csv", dtype='category')
ip_test_set = pd.read_csv("../resources/datasets/ip_test_set.csv", dtype='category')
# print("ip missing ", ip_train_set.isnull().sum().sum())
# get_feature_correlations(ip_train_set)
# Separate training feature & training labels
X_train = ip_train_set.drop(['class'], axis=1)
y_train = ip_train_set['class']
# Separate testing feature & testing labels
X_test = ip_test_set.drop(['class'], axis=1)
y_test = ip_test_set['class']
get_baseline_performance(X_train, y_train, X_test, y_test)
model = RandomForestClassifier(random_state=42)
model = model.fit(X_train, y_train)
# https://towardsdatascience.com/machine-learning-kaggle-competition-part-two-improving-e5b4d61ab4b8
# https://www.kaggle.com/residentmario/automated-feature-selection-with-sklearn
# pd.Series(model.feature_importances_, index=X_train.columns[0:]).plot.bar(color='steelblue', figsize=(12, 6))
# plt.show()
# from sklearn.feature_selection import mutual_info_classif
# kepler_mutual_information = mutual_info_classif(X_train, y_train)
# plt.subplots(1, figsize=(26, 1))
# sns.heatmap(kepler_mutual_information[:, np.newaxis].T, cmap='Blues', cbar=False, linewidths=1, annot=True)
# plt.yticks([], [])
# plt.gca().set_xticklabels(X_train.columns[0:], rotation=45, ha='right', fontsize=12)
# plt.suptitle("Kepler Variable Importance (mutual_info_classif)", fontsize=18, y=1.2)
# plt.gcf().subplots_adjust(wspace=0.2)
# plt.show()
#
# trans = GenericUnivariateSelect(score_func=mutual_info_classif, mode='percentile', param=50)
# kepler_X_trans = trans.fit_transform(X_train, y_train)
# kepler_X_test_trans = trans.transform(X_test)
# print("We started with {0} features but retained only {1} of them!".format(X_train.shape[1] - 1,
# kepler_X_trans.shape[1]))
# https://www.kaggle.com/yaldazare/feature-selection-and-data-visualization
# we will not only find best features but we also find how many features do we need for best accuracy.
# The "accuracy" scoring is proportional to the number of correct classifications
clf_rf_4 = RandomForestClassifier()
# cv = KFold(n_repeats=3, n_splits=10, random_state=42)
rfecv = RFECV(estimator=clf_rf_4, step=1, cv=5, scoring='f1_micro') # 5-fold cross-validation
rfecv = rfecv.fit(X_train, y_train)
print('Optimal number of features :', rfecv.n_features_)
print('Best features :', X_train.columns[rfecv.support_])
import matplotlib.pyplot as plt
# Plot number of features VS. cross-validation scores
import matplotlib.pyplot as plt
plt.figure()
plt.xlabel("Number of features selected")
plt.ylabel("Cross validation score of number of selected features")
plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_)
plt.show()
clr_rf_5 = model.fit(X_train, y_train)
importances = clr_rf_5.feature_importances_
std = np.std([tree.feature_importances_ for tree in model.estimators_], axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(X_train.shape[1]):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure(1, figsize=(14, 13))
plt.title("Feature importances")
plt.bar(range(X_train.shape[1]), importances[indices], color="g", yerr=std[indices], align="center")
plt.xticks(range(X_train.shape[1]), X_train.columns[indices], rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.show()
predictions = model.predict(X_train)
print_evaluation_results(y_train, predictions)
predictions = model.predict(X_test)
print_evaluation_results(y_test, predictions, train=False)
joblib.dump(model, filename='../resources/models/ip_classifier.pkl')
def ep_region_classifier():
ep_train_set = pd.read_csv("../resources/datasets/ep_train_set.csv", na_values='?', dtype='category')
ep_test_set = pd.read_csv("../resources/datasets/ep_test_set.csv", na_values='?', dtype='category')
# Separate training feature & training labels
X_train = ep_train_set.drop(['class'], axis=1)
y_train = ep_train_set['class']
# Separate testing feature & testing labels
X_test = ep_test_set.drop(['class'], axis=1)
y_test = ep_test_set['class']
X_train, X_Val, y_train, y_val = train_test_split(X_train, y_train, stratify=y_train, test_size=0.1, random_state=42, shuffle=True)
model = RandomForestClassifier()
########################################### Hyper-parameter Tuning ##########################################
# Perform grid search on the classifier using f1 score as the scoring method
grid_obj = GridSearchCV(
estimator=model,
param_grid={
# 'selector__k': [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
'n_estimators': [10, 20, 30],
'max_depth': [6, 10, 20, 30],
# 'max_depth': [1, 10, 20, 30],
'min_samples_split': [1, 10, 100]
# 'model__n_estimators': np.arange(10, 200, 10)
# 'C': [1, 10, 100]
},
n_jobs=-1,
scoring="f1_micro",
cv=5,
verbose=3
)
# Fit the grid search object to the training data and find the optimal parameters
grid_fit = grid_obj.fit(X_train, y_train)
# Get the best estimator
best_clf = grid_fit.best_estimator_
print(best_clf)
predictions = best_clf.predict(X_Val)
print_evaluation_results(y_val, predictions, train=False)
model = best_clf
########################################### Final Model ###########################################
model = model.fit(X_train, y_train)
predictions = model.predict(X_train)
print_evaluation_results(y_train, predictions)
predictions = model.predict(X_test)
print_evaluation_results(y_test, predictions, train=False)
joblib.dump(model, filename='../resources/models/ep_classifier.pkl')
# Top Level Classifier
def classify_by_region(data_frame):
X = data_frame.drop([TOP_LEVEL_TARGET, SECOND_LEVEL_TARGET], axis=1) # Features - drop region, class
y = data_frame[TOP_LEVEL_TARGET] # Labels
# get_feature_correlations(data_frame, plot=True, return_resulst=False)
# mutual_info = mutual_info_classif(X, y, discrete_features='auto')
# print("mutual_info: ", mutual_info)
X_train, X_test, y_train, y_test = train_test_split(X, y, stratify=y, test_size=0.2, random_state=42, shuffle=True)
########## Handle Class Imabalnce #########
sm = BorderlineSMOTE()
X_resampled, y_resampled = sm.fit_sample(X_train, y_train)
print("After Oversampling By Region\n", (pd.DataFrame(y_resampled)).groupby('region').size())
###############################################################################
# 4. Scale data #
###############################################################################
# sc = StandardScaler()
# X_resampled = sc.fit_transform(X_resampled)
# X_test = sc.transform(X_test)
# https://datascienceplus.com/selecting-categorical-features-in-customer-attrition-prediction-using-python/
# categorical feature selection
# sf = SelectKBest(chi2, k='all')
# sf_fit = sf.fit(X_train, y_train)
# # print feature scores
# for i in range(len(sf_fit.scores_)):
# print(' %s: %f' % (X_train.columns[i], sf_fit.scores_[i]))
#
# # plot the scores
# datset = pd.DataFrame()
# datset['feature'] = X_train.columns[range(len(sf_fit.scores_))]
# datset['scores'] = sf_fit.scores_
# datset = datset.sort_values(by='scores', ascending=True)
# sns.barplot(datset['scores'], datset['feature'], color='blue')
# sns.set_style('whitegrid')
# plt.ylabel('Categorical Feature', fontsize=18)
# plt.xlabel('Score', fontsize=18)
# # plt.show()
#
sel_chi2 = SelectKBest(chi2, k='all') # chi 10 - 0.64, 0.63, 0.60
X_train_chi2 = sel_chi2.fit_transform(X_resampled, y_resampled)
X_test_chi2 = sel_chi2.transform(X_test)
# mlp = OneVsRestClassifier(MLPClassifier(hidden_layer_sizes = [100]*5, random_state=42))
# Spot Check Algorithms
# spot_check_algorithms(X_resampled, y_resampled)
# models = [SVC(kernel='poly'), RandomForestClassifier(), GradientBoostingClassifier()]
# for i in range(len(models)):
# # Get the final model
# parent_model = models[i] # LR(multiclass-ovr) -0.66, 0.67, 0.67, 0.69, 0.69, 0.68 MLP wid fs - 0.65, 0.69, 0.70, GB - 0.67, without fs 0.62, 0.61, DT - 0.58, RF - 0.67, multi_LR - wid fs 0.64 , voting - 0.60
#
# # Train the final model
# parent_model.fit(X_resampled, y_resampled)
#
# # Evaluate the final model on the training set
# predictions = parent_model.predict(X_resampled)
# print_evaluation_results(y_resampled, predictions)
#
# # Evaluate the final model on the test set
# predictions = parent_model.predict(X_test)
# print_evaluation_results(y_test, predictions, train=False)
# pipeline = Pipeline(
# [
# # ('selector', SelectKBest(f_classif)),
# ('model', RandomForestClassifier(n_jobs = -1) )
# ]
# )
#
# # Perform grid search on the classifier using f1 score as the scoring method
# grid_obj = GridSearchCV(
# estimator= GradientBoostingClassifier(),
# param_grid={
# # 'selector__k': [5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17],
# 'n_estimators': [10, 20, 30],
# 'max_depth': [6, 10, 20, 30],
# # 'max_depth': [1, 10, 20, 30],
# 'min_samples_split': [1, 10, 100]
# # 'model__n_estimators': np.arange(10, 200, 10)
# # 'C': [1, 10, 100]
# },
#
# n_jobs=-1,
# scoring="f1_micro",
# cv=5,
# verbose=3
# )
#
# # Fit the grid search object to the training data and find the optimal parameters
# grid_fit = grid_obj.fit(X_resampled, y_resampled)
# # Get the best estimator
# best_clf = grid_fit.best_estimator_
# print(best_clf)
# Get the final model
parent_model = SVC(kernel = 'rbf', C = 10)#KNN(n_neighbors = 7)-0.52 # LR(multiclass-ovr) -0.66, 0.67, 0.67, 0.69, 0.69, 0.68 MLP wid fs - 0.65, 0.69, 0.70, GB - 0.67, without fs 0.62, 0.61, DT - 0.58, RF - 0.67, multi_LR - wid fs 0.64 , voting - 0.60
t0 = time()
# Train the final model
parent_model.fit(X_resampled, y_resampled)
print("training time:", round(time() - t0, 3), "s")
# Evaluate the final model on the training set
train_predictions = parent_model.predict(X_resampled)
print_evaluation_results(y_resampled, train_predictions)
t0 = time()
# Evaluate the final model on the test set
test_predictions = parent_model.predict(X_test)
print("predicting time:", round(time() - t0, 3), "s")
print_evaluation_results(y_test, test_predictions, train=False)
confusion_matrix(parent_model, X_resampled, y_resampled, X_test, y_test)
# Plot normalized confusion matrix
# fig = plt.figure()
# fig.set_size_inches(8, 8, forward=True)
# # fig.align_labels()
# plot_confusion_matrix(cnf_matrix, classes=["1", "2", "3", "4"], normalize=False, title='Normalized confusion matrix')
# probs = parent_model.predict_proba(X_test)
# print("Prediction probabilities for Region\n", probs)
# plotConfusionMatrix(X_test, y_test, ['1', '2', '3', '4'])
# joblib.dump(parent_model, filename='../resources/models/parent_classifier.pkl')
data_frame = pd.read_csv("../resources/datasets/primary-tumor-with-region.csv", na_values='?', dtype='category')
get_details(data_frame)
print("Class count\n", data_frame.groupby(SECOND_LEVEL_TARGET).size())
# Impute missing values
data_frame = impute_missing_values(data_frame, "most_frequent")
print(data_frame.head(20))
print(data_frame.isnull().sum().sum())
get_feature_correlations(data_frame)
# Check if duplicate records exist
is_duplicated = check_duplicates(data_frame)
# Drop duplicate records if exist
if is_duplicated:
data_frame.drop_duplicates(inplace=True)
print("Dropped duplicate records. Size after dropping duplicates: ", data_frame.shape)
# One Hot Encoding
columns_to_encode = ['sex', 'histologic-type', 'bone', 'bone-marrow', 'lung', 'pleura', 'peritoneum', 'liver',
'brain', 'skin', 'neck', 'supraclavicular', 'axillar', 'mediastinum', 'abdominal']
data_frame = perform_one_hot_encoding(data_frame, columns_to_encode)
# Pre-prcoessed dataset
pre_processed_data = data_frame
# Top Level Classifier - classify by region
classify_by_region(pre_processed_data)
# Create balanced datasets for the second level
# create_separate_datasets(data_frame)
#
# balance_dataset(pre_processed_data)
#
# upper_region_classifier()
#
# thoracic_region_classifier()
#
# ip_region_classifier()
#
# ep_region_classifier()
| [
"sanushisalgado@gmail.com"
] | sanushisalgado@gmail.com |
498c7a424a202f8d8629df7baa5c9d2d5e91499c | 9b9a02657812ea0cb47db0ae411196f0e81c5152 | /repoData/chrisglass-django-rulez/allPythonContent.py | a90e7aa3a09934bbd17fa7fa477a3eb695a70f59 | [] | no_license | aCoffeeYin/pyreco | cb42db94a3a5fc134356c9a2a738a063d0898572 | 0ac6653219c2701c13c508c5c4fc9bc3437eea06 | refs/heads/master | 2020-12-14T14:10:05.763693 | 2016-06-27T05:15:15 | 2016-06-27T05:15:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 33,902 | py | __FILENAME__ = backends
# -*- coding: utf-8 -*-
import inspect
from exceptions import NotBooleanPermission
from exceptions import NonexistentFieldName
from rulez import registry
class ObjectPermissionBackend(object):
supports_object_permissions = True
supports_anonymous_user = True
supports_inactive_user = True
def authenticate(self, username, password): # pragma: no cover
return None
def has_perm(self, user_obj, perm, obj=None):
"""
This method checks if the user_obj has perm on obj. Returns True or False
Looks for the rule with the code_name = perm and the content_type of the obj
If it exists returns the value of obj.field_name or obj.field_name() in case
the field is a method.
"""
if user_obj and not user_obj.is_anonymous() and not user_obj.is_active:
# inactive users never have permissions
return False
if obj is None:
return False
# We get the rule data from our registry
rule = registry.get(perm, obj.__class__)
if rule == None:
return False
bound_field = None
try:
bound_field = getattr(obj, rule.field_name)
except AttributeError:
raise NonexistentFieldName(
"Field_name %s from rule %s does not longer exist in model %s. \
The rule is obsolete!", (rule.field_name,
rule.codename,
rule.model))
if not callable(bound_field):
raise NotBooleanPermission(
"Attribute %s from model %s on rule %s is not callable",
(rule.field_name, rule.model, rule.codename))
# Otherwise it is a callabe bound_field
# Let's see if we pass or not user_obj as a parameter
if (len(inspect.getargspec(bound_field)[0]) == 2):
is_authorized = bound_field(user_obj)
else:
is_authorized = bound_field()
if not isinstance(is_authorized, bool):
raise NotBooleanPermission(
"Callable %s from model %s on rule %s does not return a \
boolean value", (rule.field_name, rule.model, rule.codename))
return is_authorized
########NEW FILE########
__FILENAME__ = exceptions
# -*- coding: utf-8 -*-
"""
Exceptions used by django-rules. All internal and rules-specific errors
should extend RulesError class
"""
class RulesException(Exception):
pass
class NonexistentPermission(RulesException):
pass
class NonexistentFieldName(RulesException):
pass
class NotBooleanPermission(RulesException):
pass
########NEW FILE########
__FILENAME__ = models
# -*- coding: utf-8 -*-
import rolez.signals
########NEW FILE########
__FILENAME__ = registry
# -*- coding: utf-8 -*-
from rulez.exceptions import NonexistentFieldName
from collections import defaultdict
class Rule(object):
def __init__(self, codename, model, field_name='', view_param_pk='',
description=''):
self.field_name = field_name
self.description = description
self.codename = codename
self.model = model
self.view_param_pk = view_param_pk
registry = defaultdict(dict)
def register(codename, model, field_name='', view_param_pk='', description=''):
"""
This should be called from your models.py or wherever after your models are
declared (think admin registration)
"""
# Sanity check
if not field_name:
field_name = codename
if not hasattr(model,field_name):
raise NonexistentFieldName('Field %s does not exist on class %s' % (field_name,model.__name__))
registry[model].update({codename : Rule(codename, model, field_name,
view_param_pk, description)})
def get(codename, model):
return registry.get(model, {}).get(codename, None)
########NEW FILE########
__FILENAME__ = base
#-*- coding: utf-8 -*-
class AbstractRole(object):
"""
This is an abstract class to show what a role should look like
"""
@classmethod
def is_member(cls, user, obj): #pragma: nocover
raise NotImplemented
########NEW FILE########
__FILENAME__ = cache_helper
#-*- coding: utf-8 -*-
from django.contrib.auth.models import User, AnonymousUser
from django.core.cache import cache
from rulez.exceptions import RulesException
import time
"""
Cache keys:
For the list of roles, per user, per instance:
<prefix>-<user.id>-<user counter>-<obj.type>-<obj.id>-<obj counter>
For the counter , per instance:
<prefix>-<obj.type>-<obj.id>
"""
HOUR = 60*60
#===============================================================================
# Counter handling
#===============================================================================
def counter_key(obj):
if obj.__class__ in (User, AnonymousUser,):
pk = get_user_pk(obj)
else:
pk = obj.pk
obj_type = str(obj.__class__.__name__).lower()
return "%s-%s" % (obj_type, pk)
def increment_counter(obj):
"""
Invalidate the cache for the passed object.
"""
if obj is not None: # If the object is None, do nothing (it's pointless)
cache.set(counter_key(obj), int(time.time()), 1*HOUR)
def get_counter(obj):
"""
Returns the cached counter for the given object instance
"""
counter = cache.get(counter_key(obj))
if not counter:
counter = 0
return counter
def roles_key(user, obj):
if obj.__class__ in (User, AnonymousUser,):
obj_id = get_user_pk(obj)
else:
obj_id = obj.pk
obj_type = str(obj.__class__.__name__).lower()
obj_counter = get_counter(obj)
user_id = get_user_pk(user)
user_counter = get_counter(user)
return "%s-%s-%s-%s-%s" % (user_id, user_counter, obj_type, obj_id,
obj_counter)
def get_user_pk(user):
if not user or (user and user.is_anonymous()):
return 'anonymous'
else:
return user.pk
#===============================================================================
# Main function
#===============================================================================
def get_roles(user, obj):
"""
Get a list of roles assigned to a user for a specific instance from the
cache, or builds such a list if it is not found.
"""
# get roles for the user, if present:
roles = cache.get(roles_key(user, obj))
if isinstance(roles, list):
# Cache hit (a miss returns NoneType rather than an empty list)
return roles
else:
# we need to recompute roles for this model
user_roles = []
if not hasattr(obj, 'relevant_roles'):
raise RulesException(
'Cannot build roles cache for %s instance. Did you forget to \
define a "relevant_roles()" method on %s?' % (obj.__class__,
obj.__class__))
relevant = obj.relevant_roles()
for role in relevant:
if role.is_member(user, obj):
user_roles.append(role)
cache.set(roles_key(user, obj), user_roles, 1*HOUR)
return user_roles
########NEW FILE########
__FILENAME__ = models
#-*- coding: utf-8 -*-
from rulez.rolez.cache_helper import get_roles, get_user_pk, increment_counter
class ModelRoleMixin(object):
"""
This adds roles-handling methods to the model it's mixed with
"""
def get_roles(self, user):
"""
Gets all roles this user has for this object and caches it on the
instance.
Without the instance cache every call to has_role() would hit the
cache backend.
"""
rolez = getattr(self, '_rolez', {})
pk = get_user_pk(user)
if not pk in rolez.keys():
rolez[pk] = get_roles(user, self)
self._rolez = rolez
return self._rolez[pk]
def has_role(self, user, role):
"""
Checks wether the passed user is a member of the passed role for the
passed instance
"""
roles = self.get_roles(user)
if role in roles:
return True
return False
def relevant_roles(self):
"""
Returns a list of roles *classes* relevant to this instance type.
This is to optimise the building of the user's roles in case of cache
miss
"""
return self.roles
def rulez_invalidate(self):
"""
This is the default, simple case where the model is related to user, and
so invalidating it will force connected users to recalculate their keys
In some cases you will want to invalidate the related objects here by
incrementing counters for other models in your application
"""
increment_counter(self)
########NEW FILE########
__FILENAME__ = signals
#-*- coding: utf-8 -*-
from django.db.models import signals
def should_we_invalidate_rolez(sender, instance, **kwargs):
if hasattr(instance, 'rulez_invalidate'):
instance.rulez_invalidate()
signals.post_save.connect(should_we_invalidate_rolez)
signals.post_delete.connect(should_we_invalidate_rolez)
########NEW FILE########
__FILENAME__ = rulez_perms
from django import template
register = template.Library()
class RulezPermsNode(template.Node):
def __init__(self, codename, objname, varname):
self.codename = codename
self.objname = objname
self.varname = varname
def render(self, context):
user_obj = template.resolve_variable('user', context)
obj = template.resolve_variable(self.objname, context)
context[self.varname] = user_obj.has_perm(self.codename, obj)
return ''
def rulez_perms(parser, token):
'''
Template tag to check for permission against an object.
Built out of a need to use permissions with anonymous users at an
object level.
Usage:
{% load rulez_perms %}
{% for VARNAME in QUERYRESULT %}
{% rulez_perms CODENAME VARNAME as BOOLEANVARNAME %}
{% if BOOLEANVARNAME %}
I DO
{% else %}
I DON'T
{% endif %}
have permission for {{ VARNAME }}.{{ CODENAME }}!!
{% endfor %}
'''
bits = token.split_contents()
if len(bits) != 5:
raise template.TemplateSyntaxError(
'tag requires exactly three arguments')
if bits[3] != 'as':
raise template.TemplateSyntaxError(
"third argument to tag must be 'as'")
return RulezPermsNode(bits[1], bits[2], bits[4])
rulez_perms = register.tag(rulez_perms)
########NEW FILE########
__FILENAME__ = backend
#-*- coding: utf-8 -*-
from django.test.testcases import TestCase
from rulez import registry
from rulez.backends import ObjectPermissionBackend
from rulez.exceptions import NonexistentFieldName, NotBooleanPermission
from rulez.registry import Rule
class MockModel():
pk = 999
not_callable = 'whatever'
def __init__(self):
self.attr_permission = True
self.attr_wrong_permission = "I'm not a boolean"
def mock_permission(self, user):
return True
def mock_simple_permission(self):
# just a callable, no "user" parameter
return True
def mock_non_boolean_permission(self, user):
return "Whatever"
class MockUser():
def __init__(self, is_active=True):
self.pk=666
self.is_active = is_active
def is_anonymous(self):
return False
class BackendTestCase(TestCase):
def create_fixtures(self):
self.user = MockUser()
self.inactive_user = MockUser(is_active=False)
self.model = MockModel()
def test_user_is_tested_for_rule(self):
self.create_fixtures()
registry.register('mock_permission', MockModel)
back = ObjectPermissionBackend()
res = back.has_perm(self.user, 'mock_permission', self.model)
self.assertEqual(res, True)
def test_rules_returns_False_for_None_obj(self):
self.create_fixtures()
registry.register('mock_permission', MockModel)
back = ObjectPermissionBackend()
res = back.has_perm(self.user, 'mock_permission', None)
self.assertEqual(res, False)
def test_rules_returns_False_for_inexistant_rule(self):
self.create_fixtures()
registry.register('mock_permission', MockModel)
back = ObjectPermissionBackend()
res = back.has_perm(self.user, 'whatever_permission', self.model)
self.assertEqual(res, False)
def test_user_is_tested_for_simple_rule(self):
self.create_fixtures()
registry.register('mock_simple_permission', MockModel)
back = ObjectPermissionBackend()
res = back.has_perm(self.user, 'mock_simple_permission', self.model)
self.assertEqual(res, True)
def test_user_is_tested_for_simple_rule_by_field_name(self):
self.create_fixtures()
registry.register(
'mock_permission', MockModel, field_name='mock_simple_permission')
back = ObjectPermissionBackend()
res = back.has_perm(self.user, 'mock_permission', self.model)
self.assertEqual(res, True)
def test_non_existant_filenames_are_caught(self):
self.create_fixtures()
codename = 'mock_permission'
rule = Rule(codename, MockModel, field_name='I_do_not_exist')
registry.registry[MockModel].update({codename : rule})
back = ObjectPermissionBackend()
self.assertRaises(
NonexistentFieldName, back.has_perm, self.user, 'mock_permission',
self.model)
def test_inactive_user_can_never_have_any_permissions(self):
self.create_fixtures()
registry.register('mock_permission', MockModel)
back = ObjectPermissionBackend()
res = back.has_perm(self.inactive_user, 'mock_permission', self.model)
self.assertEqual(res, False)
def test_non_boolean_permissions_raises(self):
self.create_fixtures()
registry.register('mock_non_boolean_permission', MockModel)
back = ObjectPermissionBackend()
self.assertRaises(
NotBooleanPermission, back.has_perm, self.user,
'mock_non_boolean_permission', self.model)
def test_non_callable_permission_raises(self):
self.create_fixtures()
registry.register('not_callable', MockModel)
back = ObjectPermissionBackend()
self.assertRaises(
NotBooleanPermission, back.has_perm, self.user,
'not_callable', self.model)
########NEW FILE########
__FILENAME__ = registry
#-*- coding: utf-8 -*-
from django.test.testcases import TestCase
from rulez import registry
from rulez.exceptions import NonexistentFieldName
class MockModel():
pk = 999
def mock_permission(self):
return True
class MockUser():
def __init__(self):
self.pk=666
def is_anonymous(self):
return False
class RegistryTestCase(TestCase):
def test_rule_is_registered(self):
registry.register('mock_permission', MockModel)
# if it's been registered properly we should be able to get() something
res = registry.get('mock_permission', MockModel)
self.assertNotEqual(res, None)
self.assertNotEqual(res, {})
def test_registration_raises_non_existant_field_names(self):
self.assertRaises(NonexistentFieldName, registry.register,
'mock_permission', MockModel, field_name='inexistant'
)
########NEW FILE########
__FILENAME__ = roles_helpers
#-*- coding: utf-8 -*-
from __future__ import with_statement
from django.contrib.auth.models import AnonymousUser, User
from django.core import cache
from django.test.testcases import TestCase
from rulez.exceptions import RulesException
from rulez.rolez.base import AbstractRole
from rulez.rolez.cache_helper import get_counter, increment_counter, get_roles, \
get_user_pk, roles_key
from rulez.rolez.models import ModelRoleMixin
class Mock():
pk = 999
class MockUser():
def __init__(self):
self.pk=666
def is_anonymous(self):
return False
# Testing the model inheritence
class Tester(AbstractRole):
@classmethod
def is_member(cls, user, obj):
return getattr(user, 'member', False)
class TestModel(ModelRoleMixin):
pk = 1 # Just to emulate a Django model
roles = [Tester]
# The actual test case
class RolesCacheHelperTestCase(TestCase):
def setUp(self):
cache.cache.clear()
def test_incrementing_counter_works(self):
obj = Mock()
first = get_counter(obj)
self.assertEqual(first, 0)
increment_counter(obj)
second = get_counter(obj)
self.assertNotEqual(second, first)
def test_incrementing_counter_works_for_none(self):
increment_counter(None)
def test_get_roles_for_None_raises(self):
with self.assertRaises(AttributeError):
res = get_counter(None)
self.assertEqual(res, None)
def test_rulez_invalidate_works(self):
model = TestModel()
user = MockUser()
first = get_counter(model)
self.assertEqual(first, 0)
model.rulez_invalidate()
second = get_counter(model)
self.assertNotEqual(second, first)
def test_get_empty_roles_works(self):
model = TestModel()
user = MockUser()
res = get_roles(user, model)
self.assertEqual(res, [])
def test_user_with_role_works(self):
# Now let's make the user a member!
model = TestModel()
user = MockUser()
setattr(user, 'member', True)
res = get_roles(user, model)
self.assertEqual(len(res), 1)
def test_get_roles_cache_works(self):
# Now let's assert the cache works.
model = TestModel()
user = MockUser()
setattr(user, 'member', True)
res = get_roles(user, model)
self.assertEqual(len(res), 1)
res2 = get_roles(user, model)
self.assertEqual(len(res2), 1)
self.assertEqual(res, res2)
def test_has_role_works(self):
model = TestModel()
user = MockUser()
setattr(user, 'member', True)
res = model.has_role(user, Tester)
self.assertEqual(res, True)
def test_has_role_caches_on_instance(self):
model = TestModel()
user = MockUser()
setattr(user, 'member', True)
self.assertFalse(hasattr(model, "_rolez"))
res = model.has_role(user, Tester)
self.assertEqual(res, True)
self.assertTrue(hasattr(model, "_rolez"))
self.assertEqual(1, len(model._rolez))
res = model.has_role(user, Tester)
self.assertEqual(res, True)
self.assertTrue(hasattr(model, "_rolez"))
self.assertEqual(1, len(model._rolez))
def test_doesnt_have_role_works(self):
model = TestModel()
user = MockUser()
res = model.has_role(user, Tester)
self.assertEqual(res, False)
def test_get_anonymous_user_works(self):
anon = AnonymousUser()
res = get_user_pk(anon)
self.assertEqual(res, 'anonymous')
def test_get_roles_works_for_anonymous(self):
model = TestModel()
user = AnonymousUser()
res = model.has_role(user, Tester)
self.assertEqual(res, False)
def test_get_counter_does_not_return_spaces(self):
obj = Mock()
user = MockUser()
roles_key(user, obj) # The first time, the counter == 0
increment_counter(obj) # Now there should be a timestamp
res = roles_key(user, obj)
self.assertTrue(' ' not in res)
def test_roles_for_users_on_users_raises_without_relevant_roles(self):
# If for some reasons you want to enforce rules on users...
django_user = User.objects.create(username="test",
email="test@example.com",
first_name="Test",
last_name = "Tester")
user = MockUser() # That's faster
setattr(user, 'member', True)
with self.assertRaises(RulesException):
res = get_roles(user, django_user)
self.assertEqual(len(res), 1)
def test_roles_for_users_on_users_works_with_relevant_roles(self):
# If for some reasons you want to enforce rules on users...
django_user = User.objects.create(username="test",
email="test@example.com",
first_name="Test",
last_name = "Tester")
setattr(django_user, 'relevant_roles', lambda : [Tester])
user = MockUser() # That's faster
setattr(user, 'member', True)
res = get_roles(user, django_user)
self.assertEqual(len(res), 1)
########NEW FILE########
__FILENAME__ = signals
#-*- coding: utf-8 -*-
from django.test.testcases import TestCase
from rulez.rolez.signals import should_we_invalidate_rolez
class MockInstance(object):
def __init__(self):
self.called = False
def rulez_invalidate(self):
self.called = True
class SignalsTestCase(TestCase):
def test_handling_forwards_properly(self):
inst = MockInstance()
should_we_invalidate_rolez(self, inst)
self.assertEqual(inst.called, True)
########NEW FILE########
__FILENAME__ = templatetags
#-*- coding: utf-8 -*-
from django.test.testcases import TestCase
from django.contrib.auth.models import AnonymousUser, User
from django.template import Template, Context, TemplateSyntaxError
from rulez import registry
from rulez.tests.backend import MockUser
class MockModel(object):
pk = 999
def mock_positive_permission(self, user):
return True
def mock_negative_permission(self, user):
return False
class TemplatetagTestCase(TestCase):
def create_fixtures(self):
self.user = MockUser()
self.inactive_user = MockUser(is_active=False)
self.model = MockModel()
def render_template(self, template, context):
context = Context(context)
return Template(template).render(context)
def assertYesHeCan(self, permission, user):
registry.register(permission, MockModel)
rendered = self.render_template(
"{% load rulez_perms %}"
"{% rulez_perms " + permission + " object as can %}"
"{% if can %}yes he can{% else %}no can do{% endif %}",
{
"user": user,
"object": MockModel()
}
)
self.assertEqual(rendered, "yes he can")
def assertNoHeCant(self, permission, user):
registry.register(permission, MockModel)
rendered = self.render_template(
"{% load rulez_perms %}"
"{% rulez_perms " + permission + " object as can %}"
"{% if can %}yes he can{% else %}no he can't{% endif %}",
{
"user": user,
"object": MockModel()
}
)
self.assertEqual(rendered, "no he can't")
def test_active_user_against_positive_permission(self):
self.assertYesHeCan("mock_positive_permission", User(is_active=True))
def test_active_user_for_negative_permission(self):
self.assertNoHeCant("mock_negative_permission", User(is_active=True))
def test_inactive_user_against_positive_permission(self):
self.assertNoHeCant("mock_positive_permission", User(is_active=False))
def test_inactive_user_against_negative_permission(self):
self.assertNoHeCant("mock_negative_permission", User(is_active=False))
def test_anonymous_user_against_positive_permission(self):
self.assertYesHeCan("mock_positive_permission", AnonymousUser())
def test_anonymous_user_against_negative_permission(self):
self.assertNoHeCant("mock_negative_permission", AnonymousUser())
def test_active_user_against_missing_permission(self):
permission = "missing"
rendered = self.render_template(
"{% load rulez_perms %}"
"{% rulez_perms " + permission + " object as can %}"
"{% if can %}yes he can{% else %}no he can't{% endif %}",
{
"user": User(is_active=True),
"object": MockModel()
}
)
self.assertEqual(rendered, "no he can't")
def test_invalid_user(self):
self.assertRaisesRegexp((TemplateSyntaxError, AttributeError),
"'NoneType' object has no attribute 'has_perm'",
self.render_template,
"{% load rulez_perms %}{% rulez_perms mock_positive_permission object as can %}", {
"object": MockModel(), "user": None
})
def test_tag_syntax(self):
registry.register("mock_positive_permission", MockModel)
# TODO: error messages from template tag a bit are confusing.
self.assertRaisesRegexp(TemplateSyntaxError, "tag requires exactly three arguments", self.render_template,
"{% load rulez_perms %}{% rulez_perms mock_positive_permission object %}", {})
self.assertRaisesRegexp(TemplateSyntaxError, "tag requires exactly three arguments", self.render_template,
"{% load rulez_perms %}{% rulez_perms mock_positive_permission object can %}", {})
self.assertRaisesRegexp(TemplateSyntaxError, "third argument to tag must be 'as'", self.render_template,
"{% load rulez_perms %}{% rulez_perms mock_positive_permission object can can %}", {})
########NEW FILE########
__FILENAME__ = manage
#!/usr/bin/env python
from django.core.management import execute_manager
import imp
#Added for test runner
import os, sys
sys.path.insert(0, os.path.abspath('./../../'))
try:
imp.find_module('settings') # Assumed to be in the same directory.
except ImportError:
import sys
sys.stderr.write("Error: Can't find the file 'settings.py' in the directory containing %r. It appears you've customized things.\nYou'll have to run django-admin.py, passing it your settings module.\n" % __file__)
sys.exit(1)
import settings
if __name__ == "__main__":
execute_manager(settings)
########NEW FILE########
__FILENAME__ = models
# -*- coding: utf-8 -*-
########NEW FILE########
__FILENAME__ = tests
"""
This file demonstrates writing tests using the unittest module. These will pass
when you run "manage.py test".
Replace this with more appropriate tests for your application.
"""
from django.test import TestCase
class SimpleTest(TestCase):
def test_basic_addition(self):
"""
Tests that 1 + 1 always equals 2.
"""
self.assertEqual(1 + 1, 2)
########NEW FILE########
__FILENAME__ = views
# Create your views here.
########NEW FILE########
__FILENAME__ = settings
# Django settings for testapp project.
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'test.database', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = '/static/'
# URL prefix for admin static files -- CSS, JavaScript and images.
# Make sure to use a trailing slash.
# Examples: "http://foo.com/static/admin/", "/static/admin/".
ADMIN_MEDIA_PREFIX = '/static/admin/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'o(ifwru@r&@i!g!%_w85_*oveey7iq3hoq1hfvd^e6(25gd+t2'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'testapp.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'rulez', # The actual rulez package
'project', # import the test app too
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'mail_admins': {
'level': 'ERROR',
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
AUTHENTICATION_BACKENDS = [
"rulez.backends.ObjectPermissionBackend",
]
########NEW FILE########
__FILENAME__ = urls
from django.conf.urls.defaults import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'testapp.views.home', name='home'),
# url(r'^testapp/', include('testapp.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
########NEW FILE########
| [
"dyangUCI@github.com"
] | dyangUCI@github.com |
42e03737a8c5e93575e7afa1338bcfa4fadacf2b | a087050b04bc1b0274ffb9de5a8ae3008da6cb2e | /size_caculate.py | 1537211a91e4ee63452ba6f4b473c96322a0aa20 | [] | no_license | jasper-cell/medical_Image_OpenCV | fd0c030821e60df6d06f6676b342a580f1a43a38 | d1ebae72d8d8a05570009a4567eb498bb46a3e5a | refs/heads/main | 2023-04-13T06:38:20.175855 | 2021-04-09T07:05:52 | 2021-04-09T07:05:52 | 356,169,694 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,487 | py | # import the necessary packages
from scipy.spatial import distance as dist
from imutils import perspective
from imutils import contours
import numpy as np
import argparse
import imutils
import cv2
import time
def midpoint(ptA, ptB):
return ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)
# 定义对应的命令行参数
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=False, default='./standardSample/jiazhuangxianrutouzhuangai.jpg',
help="path to the input image")
ap.add_argument("-w", "--width", type=float, required=False, default=2,
help="width of the left-most object in the image (in inches)")
# ap.add_argument("-e", "--enhance", type=bool, required=False, default=False, help="the key word to open or close
# the enhance function")
ap.add_argument("-e", "--enhance", type=bool, required=False, default=True,
help="the key word to open or close the enhance function")
args = vars(ap.parse_args())
sample_A = (138, 681)
sample_B = (179, 682)
# 加载图像并进行灰度处理以及对应的高斯模糊操作
image = cv2.imread(args["image"])
print(image.shape)
image = image[10:, 0:image.shape[1] - 10, :]
# 定义对应的信号函数
def hsv(arg):
global image_res
# 定义hsv空间的相应值
LH = cv2.getTrackbarPos("LH", "TrackBars")
LS = cv2.getTrackbarPos("LS", "TrackBars")
LV = cv2.getTrackbarPos("LV", "TrackBars")
LH = cv2.getTrackbarPos("LH", "TrackBars")
UH = cv2.getTrackbarPos("UH", "TrackBars")
US = cv2.getTrackbarPos("US", "TrackBars")
UV = cv2.getTrackbarPos("UV", "TrackBars")
# 定义对比度和亮度的调节值
Ncnum = cv2.getTrackbarPos("NContrast", "TrackBars")
Nbnum = cv2.getTrackbarPos("NBrightness", "TrackBars")
Pcnum = cv2.getTrackbarPos("PContrast", "TrackBars")
Pbnum = cv2.getTrackbarPos("PBrightness", "TrackBars")
# 将hsv空间的底层值和顶层值进行归纳总结
lower = np.array([LH, LS, LV])
upper = np.array([UH, US, UV])
# 使用对应的参数对图像进行操作,从而取得相应的roi区域
image_blur = cv2.GaussianBlur(image, (5,5), 0)
adjusted = imutils.adjust_brightness_contrast(image_blur, contrast=float(Ncnum), brightness=float(-Nbnum))
adjusted2 = imutils.adjust_brightness_contrast(adjusted, contrast=float(Pcnum), brightness=float(Pbnum))
image_hsv = cv2.cvtColor(adjusted2, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(image_hsv, lower, upper)
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None,iterations=2)
mask = cv2.GaussianBlur(mask, (3, 3), 0)
image_res = cv2.bitwise_and(adjusted2, adjusted2, mask=mask)
cv2.imshow("res", adjusted2)
cv2.imshow("hsv", image_hsv)
cv2.imshow("return", image_res)
# 创建对应的自定义的对比度和亮度的调节器
# cv2.namedWindow("TrackBars")
# cv2.resizeWindow("TrackBars", 640, 480)
# cv2.createTrackbar("LH", "TrackBars", 0, 255, hsv)
# cv2.createTrackbar("LS", "TrackBars", 0, 255, hsv)
# cv2.createTrackbar("LV", "TrackBars", 0, 255, hsv)
# cv2.createTrackbar("UH", "TrackBars", 255, 255, hsv)
# cv2.createTrackbar("US", "TrackBars", 255, 255, hsv)
# cv2.createTrackbar("UV", "TrackBars", 255, 255, hsv)
# cv2.createTrackbar("NBrightness", "TrackBars", 0, 256, hsv)
# cv2.createTrackbar("NContrast", "TrackBars", 0, 256, hsv)
# cv2.createTrackbar("PBrightness", "TrackBars", 0, 256, hsv)
# cv2.createTrackbar("PContrast", "TrackBars", 0, 256, hsv)
#
# if cv2.waitKey(0) == 27:
# cv2.destroyAllWindows()
# adjusted = imutils.adjust_brightness_contrast(image, contrast=80.0, brightness=-180.0)
# if(args['enhance']):
# adjusted = imutils.adjust_brightness_contrast(adjusted, contrast=50.0, brightness=35.0)
# cv2.imshow("ad", adjusted)
# cv2.waitKey(0)
# cv2.imshow("original", image)
# cv2.waitKey(0)
# cv2.imshow("image_res", image_res)
# cv2.waitKey(0)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
gray = cv2.GaussianBlur(gray, (7, 7), 1) # 对原始图像中的一些小的边界进行模糊操作
# gray = cv2.medianBlur(gray, 5) # 对原始图像中的一些小的边界进行模糊操作
cv2.imshow("gray", gray)
cv2.waitKey(0)
cv2.imwrite('./gray_images/gray_jiazhuangxian.jpg', gray)
# 使用Canny进行相应的边缘检测
edged = cv2.Canny(gray, 70, 150)
# edged = imutils.auto_canny(gray, sigma=0.8)
# cv2.imshow("edged1", edged)
# cv2.waitKey(0)
edged = cv2.dilate(edged, (5, 5), iterations=1)
edged = cv2.erode(edged, (5, 5), iterations=1)
# cv2.imshow("edged2", edged)
# cv2.waitKey(0)
edged = cv2.GaussianBlur(edged, (7, 7), 0) # 对边缘进行模糊操作使得边界的粒度更大一些,而不是仅仅关心于局部的边界情况
# edged = cv2.medianBlur(edged, 5) # 对边缘进行模糊操作使得边界的粒度更大一些,而不是仅仅关心于局部的边界情况
cv2.imshow("edged", edged)
cv2.waitKey(0)
# 在经过边缘检测的图像中进行轮廓特征的提取
cnts = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if imutils.is_cv2() else cnts[1] # 不同版本的opencv对应的contours的位置是不一致的
# 对轮廓按照从左到右的顺序进行排序操作
(cnts, _) = contours.sort_contours(cnts) # 对轮廓按照从左到右的顺序进行相应的排序操作
# 用于存储每个像素点对应真实值的比例
pixelsPerMetric = None
# 对每一个轮廓进行单独的处理
for c in cnts:
final = np.zeros(image.shape, np.uint8) # 建立对应的最后颜色提取图
mask = np.zeros(gray.shape, np.uint8) # 提取对应轮廓的蒙皮
# 如果轮廓的周长或者面积是不足够大的情况下不予考虑
if abs(cv2.arcLength(c, closed=True)) < 200:
continue
# 计算与轮廓相切的最小的外切矩形
orig = image.copy()
box = cv2.minAreaRect(c)
box = cv2.cv.BoxPoints(box) if imutils.is_cv2() else cv2.boxPoints(box)
box = np.array(box, dtype="int")
# order the points in the contour such that they appear
# in top-left, top-right, bottom-right, and bottom-left
# order, then draw the outline of the rotated bounding
# box
box = perspective.order_points(box)
cv2.drawContours(orig, [box.astype("int")], -1, (0, 255, 0), 2)
# loop over the original points and draw them
for (x, y) in box:
cv2.circle(orig, (int(x), int(y)), 5, (0, 0, 255), -1)
# 计算顶边和底边的两个中心点
(tl, tr, br, bl) = box
(tltrX, tltrY) = midpoint(tl, tr)
(blbrX, blbrY) = midpoint(bl, br)
# 计算左边和右边的两个中心点
(tlblX, tlblY) = midpoint(tl, bl)
(trbrX, trbrY) = midpoint(tr, br)
# 计算外切矩形正中心位置处颜色的值
points = []
(middleX, middleY) = midpoint([tlblX, tlblY], [trbrX, trbrY])
point1 = midpoint([tlblX, tlblY], [middleX, middleY])
point2 = midpoint([tltrX, tltrY], [middleX, middleY])
point3 = midpoint([trbrX, trbrY], [middleX, middleY])
point4 = midpoint([blbrX, blbrY], [middleX, middleY])
points.append(point1)
points.append(point2)
points.append(point3)
points.append(point4)
points = np.array(points)
points = np.expand_dims(points, 1)
print("points.shape: ", points.shape)
color = orig[int(middleY), int(middleX)]
color = color.tolist()
print(color)
# 绘制每一条边的中心点
cv2.circle(orig, (int(tltrX), int(tltrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(blbrX), int(blbrY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(tlblX), int(tlblY)), 5, (255, 0, 0), -1)
cv2.circle(orig, (int(trbrX), int(trbrY)), 5, (255, 0, 0), -1)
# 绘制中点之间的线段
cv2.line(orig, (int(tltrX), int(tltrY)), (int(blbrX), int(blbrY)),
(255, 0, 255), 2)
cv2.line(orig, (int(tlblX), int(tlblY)), (int(trbrX), int(trbrY)),
(255, 0, 255), 2)
# 对应中点的连线来作为对应的长和宽的值来使用
dA = dist.euclidean((tltrX, tltrY), (blbrX, blbrY))
dB = dist.euclidean((tlblX, tlblY), (trbrX, trbrY))
sample_dist = dist.euclidean(sample_A, sample_B)
print("dA: {:.2f}, dB: {:.2f}".format(dA, dB))
pixelsPerMetric = sample_dist / args["width"]
print("pixelsPerMetric: ", pixelsPerMetric)
# 获取相应参考物的比例尺
if pixelsPerMetric is None:
pixelsPerMetric = dB / args["width"] # 计算对应的比例尺
print(pixelsPerMetric)
# 计算物体的实际尺寸
dimA = dA / pixelsPerMetric
dimB = dB / pixelsPerMetric
res = cv2.arcLength(c, True) # 计算对应的轮廓的弧长
approx = cv2.approxPolyDP(c, 0.001 * res, True)
ret = cv2.drawContours(orig, [c], -1, (50, 0, 212), 5)
area = cv2.contourArea(approx) # 计算对应的轮廓的面积
# 绘制对应外接矩形的长和宽在相应的图像上
cv2.putText(orig, "height: {:.2f}cm".format(dimA),
(int(tltrX - 15), int(tltrY - 10)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 0, 0), 2)
cv2.putText(orig, "width: {:.2f}cm".format(dimB),
(int(trbrX + 10), int(trbrY)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (255, 0, 0), 2)
# 提取对应轮廓的颜色均值
mask[...] = 0
cv2.drawContours(mask, [c], -1, 255, -1)
cv2.drawContours(final, [c], -1, cv2.mean(orig, mask), -1)
mean_color = cv2.mean(orig, mask)
# 在图像上绘制对应的关键文本信息
cv2.putText(orig, "arcLength:{:.1f}mm".format(res), (int(trbrX + 30), int(trbrY + 20)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (122, 255, 255), 2)
cv2.putText(orig, "area:{:.2f}".format(dimA * dimB), (int(trbrX + 30), int(trbrY + 40)), cv2.FONT_HERSHEY_SIMPLEX,
0.65, (122, 255, 255), 2)
cv2.putText(orig, "color: B{}, G{}, R{}".format(color[0], color[1], color[2]), (int(trbrX + 30), int(trbrY + 60)),
cv2.FONT_HERSHEY_SIMPLEX, 0.5, (122, 255, 255), 2)
cv2.rectangle(orig, (0, 0), (40, 40), (mean_color[0], mean_color[1], mean_color[2]), -1)
cv2.putText(final, "color: B{}, G{}, R{}".format(int(mean_color[0]), int(mean_color[1]), int(mean_color[2])),
(int(trbrX + 30), int(trbrY + 60)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (122, 255, 255), 2)
# 展示对应的图像
cv2.imshow("Image", orig)
cv2.imshow("mask", mask)
cv2.imshow("final", final)
cv2.waitKey(0)
| [
"lingtaner@163.com"
] | lingtaner@163.com |
39a838b6a8b40df53319fc2da1706fdb2d4dc906 | 737cb18e9ccb67da8b3eef4cafae86bc64bca76d | /PyCharmProj/RadioWave.py | 538c32fb0f685b61ff13d77ea0a3dab4af89d624 | [
"LicenseRef-scancode-public-domain"
] | permissive | stephanieeechang/PythonGameProg | 556f7c38a356313bd07a632680dc6b8dea31b1d8 | a2cc472ec95844bbdc6d31347684db80615a997c | refs/heads/master | 2020-12-04T18:53:34.139257 | 2017-05-25T01:24:45 | 2017-05-25T01:24:45 | 66,228,833 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,912 | py | import sys
from PyQt5 import QtGui, QtWidgets, QtCore
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import QDockWidget, QHBoxLayout
import pyqtgraphdev.pyqtgraph as pt
from pyqtgraphdev.pyqtgraph import PlotWidget
import numpy as np
import time
class CosGraph(QDockWidget):
def __init__(self, parent=None):
super(CosGraph, self).__init__(parent=parent)
self.setContextMenuPolicy(Qt.NoContextMenu)
# container for cosine graph
self.host = QtWidgets.QWidget(self)
self.host.setObjectName("hostCosine")
self.host.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
self.host.setMinimumSize(200, 400)
# define the graph
self.graph = PlotWidget(self)
# horizontal, vertical lines, thickness
self.graph.plotItem.showGrid(True, True, 0.7)
self.setObjectName("cosplot")
self.graph.raise_()
# add graph container to layout
self.horizontalLayout = QHBoxLayout(self.host)
self.horizontalLayout.addWidget(self.graph)
def plot(self, data):
"""
:param data: dictionary with x, y data
:return: none
"""
X = data['X']
Y = data['Y']
colorLine = data['pen']
self.graph.plot(X, Y, pen=colorLine, clear=True)
def resizeEvent(self, e):
self.host.setGeometry(10, 10, e.size().width(), e.size().height())
self.graph.setGeometry(10, 10, e.size().width(), 0.9*e.size().height())
class SinGraph(QDockWidget):
def __init__(self, parent=None):
super(SinGraph, self).__init__(parent=parent)
self.setContextMenuPolicy(Qt.NoContextMenu)
# container for sine graph
self.host = QtWidgets.QWidget(self)
self.host.setObjectName("hostSine")
self.host.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
self.host.setMinimumSize(200, 400)
# define the graph
self.graph = PlotWidget(self)
# horizontal, vertical lines, thickness
self.graph.plotItem.showGrid(True, True, 0.7)
self.setObjectName("sinplot")
self.graph.raise_()
# add graph container to layout
self.horizontalLayout = QHBoxLayout(self.host)
self.horizontalLayout.addWidget(self.graph)
def plot(self, data):
"""
:param data: dictionary with x, y data
:return: none
"""
X = data['X']
Y = data['Y']
colorLine = data['pen']
self.graph.plot(X, Y, pen=colorLine, clear=True)
def resizeEvent(self, e):
self.host.setGeometry(10, 10, e.size().width(), e.size().height())
self.graph.setGeometry(10, 10, e.size().width(), 0.9*e.size().height())
class RadioGraph(QDockWidget):
def __init__(self, parent=None):
super(RadioGraph, self).__init__(parent=parent)
self.setContextMenuPolicy(Qt.NoContextMenu)
# container for cosine graph
self.host = QtWidgets.QWidget(self)
self.host.setObjectName("hostRadio")
self.host.setSizePolicy(QtGui.QSizePolicy.Maximum, QtGui.QSizePolicy.Maximum)
self.host.setMinimumSize(200, 400)
# define the graph
self.graph = PlotWidget(self)
# horizontal, vertical lines, thickness
self.graph.plotItem.showGrid(True, True, 0.7)
self.setObjectName("radioplot")
self.graph.raise_()
# add graph container to layout
self.horizontalLayout = QHBoxLayout(self.host)
self.horizontalLayout.addWidget(self.graph)
def plot(self, data):
"""
:param data: dictionary with x, y data
:return: none
"""
X = data['X']
Y = data['Y']
colorLine = data['pen']
self.graph.plot(X, Y, pen=colorLine, clear=True)
def resizeEvent(self, e):
self.host.setGeometry(10, 10, e.size().width(), e.size().height())
self.graph.setGeometry(10, 10, e.size().width(), 0.9*e.size().height())
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
super(MainWindow, self).__init__(parent=parent)
self.setDockOptions(QtWidgets.QMainWindow.AllowNestedDocks | QtWidgets.QMainWindow.AnimatedDocks)
# create docks
self.dockCos = CosGraph()
self.dockSin = SinGraph()
self.dockRadio = RadioGraph()
self.addDockWidget(Qt.LeftDockWidgetArea, self.dockCos)
self.addDockWidget(Qt.LeftDockWidgetArea, self.dockSin)
self.addDockWidget(Qt.RightDockWidgetArea, self.dockRadio)
self.update()
def update(self):
Npoints = 2000
f_m = 0.1
f_c = 1
A = 5
M = 0.8
X = np.arange(0, 20, 20/Npoints)
Ycos = np.cos(2*np.pi*f_m*X)
Ysin = A*np.sin(2*np.pi*f_c*X)
Yradio = (1+M*np.cos(2*np.pi*f_m*X))*A*np.sin(2*np.pi*f_c*X)
# change color every second
c = pt.hsvColor(time.time() / 5%1, alpha=.5)
pen = pt.mkPen(color=c, width=2)
cosdata = {'X':X, 'Y':Ycos, 'pen':pen}
sindata = {'X':X, 'Y':Ysin, 'pen':pen}
radiodata = {'X':X, 'Y':Yradio, 'pen':pen}
self.dockCos.plot(cosdata)
self.dockSin.plot(sindata)
self.dockRadio.plot(radiodata)
# refresh
QtCore.QTimer.singleShot(100, self.update)
class SandBoxApp(QtWidgets.QApplication):
# define class constructor
def __init__(self, *args, **kwargs):
super(SandBoxApp, self).__init__(*args)
self.mainwindow = MainWindow()
self.mainwindow.setGeometry(50, 100, 1200, 750)
self.mainwindow.show()
# disable context menu (the menu that appears when user right clicks mouse)
self.mainwindow.setContextMenuPolicy(Qt.NoContextMenu)
# entry point for Python program
def main():
app = SandBoxApp(sys.argv)
sys.exit(app.exec_())
if __name__ == "__main__":
main()
| [
"stephaniechang@StephanieMacBookAir.local"
] | stephaniechang@StephanieMacBookAir.local |
92fa4c4cee9bfd46c826875c760c60269d31c06f | fcdbf22231739b61bbd4da531fcacf0c6f08c4bf | /services/lasso_regression/prod.config.py | 0ba5c9b0f0b2531fae1cba7e4b5ac173809e1241 | [] | no_license | revotechUET/wi-uservice | 2a3f169a4a9735bb3167d8bef75baceab11b2a04 | 33f2ad2f0a329fc1f54153b0ebb775f2bd74d631 | refs/heads/master | 2023-04-15T14:58:36.183521 | 2023-04-07T09:39:48 | 2023-04-07T09:39:48 | 179,446,650 | 0 | 1 | null | 2022-12-08T01:51:59 | 2019-04-04T07:39:07 | Python | UTF-8 | Python | false | false | 1,939 | py | import os
import multiprocessing
bind = '0.0.0.0:80'
backlog = 2048
workers = multiprocessing.cpu_count()
worker_class = 'sync' or os.environ.get("WORKER_CLASS")
worker_connections = 1000
timeout = 30
keepalive = 2
reload = False
spew = False
daemon = False
raw_env = [
"DB_HOST="+os.environ.get("DB_HOST", "127.0.0.1"),
"DB_PORT="+os.environ.get("DB_PORT", "27017"),
"DB_NAME="+os.environ.get("DB_NAME", "wi_regression"),
"MODEL_DIR="+os.path.join(os.getcwd(), "static"),
]
if os.environ.get("DB_USER"):
raw_env.append("DB_USER="+os.environ.get("DB_USER"))
raw_env.append("DB_PASS="+os.environ.get("DB_PASS"))
pidfile = "/tmp/lasso.pid"
umask = 0
user = None
group = None
tmp_upload_dir = None
errorlog = '-'
loglevel = 'error'
accesslog = '/var/log/wi_regression.access.log'
access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"'
proc_name = None
def post_fork(server, worker):
server.log.info("Worker spawned (pid: %s)", worker.pid)
def pre_fork(server, worker):
pass
def pre_exec(server):
server.log.info("Forked child, re-executing.")
def when_ready(server):
server.log.info("Server is ready. Spawning workers")
def worker_int(worker):
worker.log.info("worker received INT or QUIT signal")
## get traceback info
import threading, sys, traceback
id2name = {th.ident: th.name for th in threading.enumerate()}
code = []
for threadId, stack in sys._current_frames().items():
code.append("\n# Thread: %s(%d)" % (id2name.get(threadId,""),
threadId))
for filename, lineno, name, line in traceback.extract_stack(stack):
code.append('File: "%s", line %d, in %s' % (filename,
lineno, name))
if line:
code.append(" %s" % (line.strip()))
worker.log.debug("\n".join(code))
def worker_abort(worker):
worker.log.info("Worker received SIGABRT signal")
| [
"thinhlevan2015@gmail.com"
] | thinhlevan2015@gmail.com |
21720ee88c56a4728c35c27fa31a8279e9637b6d | aa3087a895f2a8f6ed489713c9f2edb6e5ae78d6 | /Docker/AI/strategies.py | 6a97396ee8e0d5974f9af6b0169f694bf8e2e23e | [] | no_license | yangboz/2017-2018-computing-thinking | 463ca863c5637008f2210b347a7c71393b5277a2 | ae0154b5785ba68c06240e0d9de6891642803089 | refs/heads/master | 2021-06-16T22:34:26.731562 | 2020-11-13T01:16:30 | 2020-11-13T01:16:30 | 93,296,144 | 11 | 6 | null | 2017-06-11T06:54:42 | 2017-06-04T07:02:59 | Python | UTF-8 | Python | false | false | 11,387 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import random
import sys
import time
import numpy as np
import gtp
import go
import utils
def sorted_moves(probability_array):
coords = [(a, b) for a in range(go.N) for b in range(go.N)]
return sorted(coords, key=lambda c: probability_array[c], reverse=True)
def translate_gtp_colors(gtp_color):
if gtp_color == gtp.BLACK:
return go.BLACK
elif gtp_color == gtp.WHITE:
return go.WHITE
else:
return go.EMPTY
def is_move_reasonable(position, move):
return position.is_move_legal(move) and go.is_eyeish(position.board, move) != position.to_play
def select_most_likely(position, move_probabilities):
for move in sorted_moves(move_probabilities):
if is_move_reasonable(position, move):
return move
return None
def select_weighted_random(position, move_probabilities):
selection = random.random()
selected_move = None
current_probability = 0
# technically, don't have to sort in order to correctly simulate a random
# draw, but it cuts down on how many additions we do.
for move, move_prob in np.ndenumerate(move_probabilities):
current_probability += move_prob
if current_probability > selection:
selected_move = move
break
if is_move_reasonable(position, selected_move):
return selected_move
else:
# fallback in case the selected move was illegal
return select_most_likely(position, move_probabilities)
class GtpInterface(object):
def __init__(self):
print("GtpInterface.__init__().")
self.size = 9
self.position = None
self.komi = 6.5
print("GtpInterface(object):",self,object)
#self.clear()
super(PolicyNetworkBestMovePlayer, self).clear()
def set_size(self, n):
self.size = n
go.set_board_size(n)
self.clear()
def set_komi(self, komi):
self.komi = komi
self.position.komi = komi
def clear(self):
self.position = go.Position(komi=self.komi)
def accomodate_out_of_turn(self, color):
if not translate_gtp_colors(color) == self.position.to_play:
self.position.flip_playerturn(mutate=True)
def make_move(self, color, vertex):
coords = utils.parse_pygtp_coords(vertex)
self.accomodate_out_of_turn(color)
self.position = self.position.play_move(coords, color=translate_gtp_colors(color))
return self.position is not None
def get_move(self, color):
self.accomodate_out_of_turn(color)
move = self.suggest_move(self.position)
return utils.unparse_pygtp_coords(move)
def suggest_move(self, position):
raise NotImplementedError
class RandomPlayer(GtpInterface):
def suggest_move(self, position):
possible_moves = go.ALL_COORDS[:]
random.shuffle(possible_moves)
for move in possible_moves:
if is_move_reasonable(position, move):
return move
return None
class PolicyNetworkBestMovePlayer(GtpInterface):
def __init__(self, policy_network, read_file):
print(self,"__init__")
self.policy_network = policy_network
self.read_file = read_file
print(self,"read_file:",read_file)
#super().__init__()
GtpInterface.__init__(self)
#super(PolicyNetworkBestMovePlayer, self).__init__()
def clear(self):
#super().clear()
super(PolicyNetworkBestMovePlayer, self).clear()
self.refresh_network()
def refresh_network(self):
# Ensure that the player is using the latest version of the network
# so that the network can be continually trained even as it's playing.
self.policy_network.initialize_variables(self.read_file)
def suggest_move(self, position):
if position.recent and position.n > 100 and position.recent[-1].move == None:
# Pass if the opponent passes
return None
move_probabilities = self.policy_network.run(position)
return select_most_likely(position, move_probabilities)
class PolicyNetworkRandomMovePlayer(GtpInterface):
def __init__(self, policy_network, read_file):
self.policy_network = policy_network
self.read_file = read_file
super().__init__()
def clear(self):
super().clear()
self.refresh_network()
def refresh_network(self):
# Ensure that the player is using the latest version of the network
# so that the network can be continually trained even as it's playing.
self.policy_network.initialize_variables(self.read_file)
def suggest_move(self, position):
if position.recent and position.n > 100 and position.recent[-1].move == None:
# Pass if the opponent passes
return None
move_probabilities = self.policy_network.run(position)
return select_weighted_random(position, move_probabilities)
# Exploration constant
c_PUCT = 5
class MCTSNode():
'''
A MCTSNode has two states: plain, and expanded.
An plain MCTSNode merely knows its Q + U values, so that a decision
can be made about which MCTS node to expand during the selection phase.
When expanded, a MCTSNode also knows the actual position at that node,
as well as followup moves/probabilities via the policy network.
Each of these followup moves is instantiated as a plain MCTSNode.
'''
@staticmethod
def root_node(position, move_probabilities):
node = MCTSNode(None, None, 0)
node.position = position
node.expand(move_probabilities)
return node
def __init__(self, parent, move, prior):
self.parent = parent # pointer to another MCTSNode
self.move = move # the move that led to this node
self.prior = prior
self.position = None # lazily computed upon expansion
self.children = {} # map of moves to resulting MCTSNode
self.Q = self.parent.Q if self.parent is not None else 0 # average of all outcomes involving this node
self.U = prior # monte carlo exploration bonus
self.N = 0 # number of times node was visited
def __repr__(self):
return "<MCTSNode move=%s prior=%s score=%s is_expanded=%s>" % (
self.move, self.prior, self.action_score, self.is_expanded())
@property
def action_score(self):
# Note to self: after adding value network, must calculate
# self.Q = weighted_average(avg(values), avg(rollouts)),
# as opposed to avg(map(weighted_average, values, rollouts))
return self.Q + self.U
def is_expanded(self):
return self.position is not None
def compute_position(self):
self.position = self.parent.position.play_move(self.move)
return self.position
def expand(self, move_probabilities):
self.children = {move: MCTSNode(self, move, prob)
for move, prob in np.ndenumerate(move_probabilities)}
# Pass should always be an option! Say, for example, seki.
self.children[None] = MCTSNode(self, None, 0)
def backup_value(self, value):
self.N += 1
if self.parent is None:
# No point in updating Q / U values for root, since they are
# used to decide between children nodes.
return
self.Q, self.U = (
self.Q + (value - self.Q) / self.N,
c_PUCT * math.sqrt(self.parent.N) * self.prior / self.N,
)
# must invert, because alternate layers have opposite desires
self.parent.backup_value(-value)
def select_leaf(self):
current = self
while current.is_expanded():
current = max(current.children.values(), key=lambda node: node.action_score)
return current
class MCTS(GtpInterface):
def __init__(self, policy_network, read_file, seconds_per_move=5):
self.policy_network = policy_network
self.seconds_per_move = seconds_per_move
self.max_rollout_depth = go.N * go.N * 3
self.read_file = read_file
super().__init__()
def clear(self):
super().clear()
self.refresh_network()
def refresh_network(self):
# Ensure that the player is using the latest version of the network
# so that the network can be continually trained even as it's playing.
self.policy_network.initialize_variables(self.read_file)
def suggest_move(self, position):
if position.caps[0] + 50 < position.caps[1]:
return gtp.RESIGN
start = time.time()
move_probs = self.policy_network.run(position)
root = MCTSNode.root_node(position, move_probs)
while time.time() - start < self.seconds_per_move:
self.tree_search(root)
# there's a theoretical bug here: if you refuse to pass, this AI will
# eventually start filling in its own eyes.
return max(root.children.keys(), key=lambda move, root=root: root.children[move].N)
def tree_search(self, root):
#print("tree search", file=sys.stderr)
print("tree search")
# selection
chosen_leaf = root.select_leaf()
# expansion
position = chosen_leaf.compute_position()
if position is None:
#print("illegal move!", file=sys.stderr)
print("illegal move!")
# See go.Position.play_move for notes on detecting legality
del chosen_leaf.parent.children[chosen_leaf.move]
return
#print("Investigating following position:\n%s" % (chosen_leaf.position,), file=sys.stderr)
print("Investigating following position:\n%s" % (chosen_leaf.position,))
move_probs = self.policy_network.run(position)
chosen_leaf.expand(move_probs)
# evaluation
value = self.estimate_value(root, chosen_leaf)
# backup
#print("value: %s" % value, file=sys.stderr)
print("value: %s" % value)
chosen_leaf.backup_value(value)
def estimate_value(self, root, chosen_leaf):
# Estimate value of position using rollout only (for now).
# (TODO: Value network; average the value estimations from rollout + value network)
leaf_position = chosen_leaf.position
current = copy.deepcopy(leaf_position)
while current.n < self.max_rollout_depth:
move_probs = self.policy_network.run(current)
current = self.play_valid_move(current, move_probs)
if len(current.recent) > 2 and current.recent[-1].move == current.recent[-2].move == None:
break
else:
#print("max rollout depth exceeded!", file=sys.stderr)
print("max rollout depth exceeded!")
perspective = 1 if leaf_position.to_play == root.position.to_play else -1
return current.score() * perspective
def play_valid_move(self, position, move_probs):
for move in sorted_moves(move_probs):
if go.is_eyeish(position.board, move):
continue
try:
candidate_pos = position.play_move(move, mutate=True)
except go.IllegalMove:
continue
else:
return candidate_pos
return position.pass_move(mutate=True)
| [
"YoungWelle@gmail.com"
] | YoungWelle@gmail.com |
02c4056393f75166172b697deb56dd962071d06d | b17befe76d07da3df9c7928e9c6c9b617740e46d | /online_Service/application/migrations/0005_auto_20200515_1125.py | 4e594c556b5bcb47a55e41102057385c999071fa | [] | no_license | Gunashekar0619/e-commerce_website_backend | 4ed4340ce8d46389123dbcb6fb47c44ec6d203ed | 59bdd7c54c4f751d87d89a1f4412ee6281f38ad4 | refs/heads/master | 2022-12-21T11:20:15.088326 | 2020-06-06T10:41:39 | 2020-06-06T10:41:39 | 269,946,202 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 634 | py | # Generated by Django 3.0.6 on 2020-05-15 05:55
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('application', '0004_userprofile'),
]
operations = [
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='guna', to=settings.AUTH_USER_MODEL, unique=True),
),
]
| [
"57127076+Gunashekar.c12@users.noreply.github.com"
] | 57127076+Gunashekar.c12@users.noreply.github.com |
a2466edecd6249559575a93468bc0b3ec0d08ad9 | 4c1b4dcc8328e8d792646ed6ed0b8d50f796c6be | /ahkl/ahkl/settings.py | 94f2e05e2c2c666f32b531b7185af51ae6fa1bbb | [] | no_license | DocNotes/docnotes-web | 124540fa33a137caccaf1926b933ae933649d1a4 | 720e9f079067ae3466debe80d12f15948e71daa4 | refs/heads/master | 2020-02-26T14:32:18.954746 | 2016-06-04T22:35:10 | 2016-06-04T22:35:10 | 60,390,799 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | """
Django settings for ahkl project.
Generated by 'django-admin startproject' using Django 1.9.6.
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'cc93z4@vt-2sr46l7v77c4)p2f8j7^6mj^cv82=at@)hctsbsh'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'docnotes',
'rest_framework',
'watson',
]
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGE_SIZE': 10
}
MIDDLEWARE_CLASSES = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'ahkl.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'ahkl.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, "static")
MEDIA_URL = '/'
MEDIA_ROOT = os.path.join(BASE_DIR)
| [
"juliankoh13@gmail.com"
] | juliankoh13@gmail.com |
7e65e72ba93d3e951244f0e0c14637a59052a9f4 | 6a7cf44a3cdce674bd0659f81f830826caac34e1 | /Playground/playground 2.py | 559b022857c6e7657af36f253418b250309d8dae | [] | no_license | Ethansu/Random-Python | 9f1b6198968091cd3f356ad2962d0efdc455c76a | 4b1b18e1cb6c04f1195082c5d0899f476e234a55 | refs/heads/master | 2021-05-06T02:10:18.163866 | 2017-12-17T00:53:14 | 2017-12-17T00:53:14 | 114,498,985 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 685 | py | import random
def math (a):
a = (range(1,51))
print([n for n in a if n % 2 == 0 or n % 3 == 0 or n % 5 == 0])
math (a)
def add (b) :
b = random.randint(1,101)
c = (range(1,10))
if sum (c) > b:
print ("c wins")
else:
print ("b wins")
add (b)
def tara_chews_leaves (d):
d = ("| ETHAN: Hi")
e = (" | JOSH: hi")
f = (" | TARA: GO AWAY")
g = (" | NOAH: no chating in class")
h = (" | ETHAN: bye idiots")
i = (" | TARA: wtf")
j = (" | JOSH: so long dum dums")
k = (" | NOAH: block them")
l = (" | Ethan su renamed the chat to fu*k")
print (d + e + f + g + h + i + j + k + l)
tara_chews_leaves (a) | [
"jingchunsumacc@gmail.com"
] | jingchunsumacc@gmail.com |
751bbc8dc6462be795093bfde9343b74f10c4f7e | d63a6811f8ca329d09ab388e9df570858693d60e | /starwars/external/swapi.py | 7aef708503d765b19f4bdef577f3706448fa9ac5 | [] | no_license | kobuz/sw_character_explorer | 25676bfb036c9dae3acaba1c46e112b14d02f934 | 88938c952c3f35d0ff86f42ce58fe93ee6d45643 | refs/heads/master | 2022-12-28T08:36:08.676514 | 2020-09-08T07:05:05 | 2020-09-08T07:05:05 | 301,735,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 794 | py | from typing import Iterator, Dict
import requests
from django.conf import settings
class Client:
def __init__(self, api_url):
self.api_url = api_url
self.session = requests.Session()
def get_single(self, url):
return self.session.get(url)
def get_paginated_results(self, path):
url = f"{self.api_url}{path}"
while url:
resp = self.get_single(url)
decoded = resp.json()
yield from decoded["results"]
url = decoded["next"]
def characters(self) -> Iterator[Dict]:
yield from self.get_paginated_results("people/")
def planets(self) -> Iterator[Dict]:
yield from self.get_paginated_results("planets/")
def make_client() -> Client:
return Client(settings.SW_API)
| [
"marcin.sokol@amsterdam-standard.pl"
] | marcin.sokol@amsterdam-standard.pl |
437161a142bd229ffdd2c81c506024a62c472dfb | b512f28468a041d42532234adc0d3316461a6f48 | /Problems/Lightbulb/main.py | 2062190169e38075bc73289978bc51a361ff30d9 | [] | no_license | skyheat47295/CoffeeMachine | c1f50941958f95fd6589b6d73b0d5a02bc1aa5ae | cc18197b0235d4b99e19be996705e2cbfcfbf271 | refs/heads/master | 2023-02-01T04:34:48.470177 | 2020-12-17T05:28:42 | 2020-12-17T05:28:42 | 293,728,980 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | class Lightbulb:
def __init__(self):
self.state = "off"
# create method change_state here
def change_state(self):
if self.state == 'off':
self.state = 'on'
print('Turning the light on')
elif self.state == 'on':
self.state = 'off'
print('Turning the light off')
| [
"70936076+skyheatfet@users.noreply.github.com"
] | 70936076+skyheatfet@users.noreply.github.com |
d27f485cf41cde49c338f72a26820ce51fe441b1 | 5ea006b0a5ddead889755199fd5dfd0c42c4b3fb | /algorithms/web/web_server.py | bda92daeebbc0a896dfbe87037e11ce7f44743a9 | [] | no_license | wylswz/twitter_pride_vanity | e9629749af6a385fcb3dc962df8aedb2f8e2d81d | 06990527ed392fedab44bda7628b8b0656f759d0 | refs/heads/master | 2022-11-28T23:37:17.729835 | 2019-06-11T06:24:33 | 2019-06-11T06:24:33 | 176,043,124 | 1 | 1 | null | 2022-11-22T03:33:04 | 2019-03-17T01:29:56 | Python | UTF-8 | Python | false | false | 5,268 | py | """
Maintainer: Yunlu Wen <yunluw@student.unimelb.edu.au>
Web service for machine learning models.
- Load models by instanciating ModelWrapper class
- Do predictions and return result
"""
from flask import Flask, request, jsonify
from face_detection_wrapper.wrapper import FaceDetectionWrapper, SSDWrapper
from siamese_wrapper.SiameseWrapper import FaceVerification
import os
import traceback
import requests
import codecs
import uuid
import numpy as np
'''
export FLASK_APP=face_detection_wrapper/web_server.py
flask run
'''
TEMP_PATH = '/var/flask_temp/'
app = Flask(__name__)
try:
model_path = os.environ['DETECTION_MODEL_PATH']
ssd_model_path = os.environ['SSD_MODEL_PATH']
comparison_model_path = os.environ['COMPARISON_MODEL_PATH']
except KeyError:
traceback.print_exc()
exit(1)
fdw = FaceDetectionWrapper(model_path)
fvw = FaceVerification(comparison_model_path)
ssdw = SSDWrapper(ssd_model_path)
ssdw.load()
fdw.load()
fvw.load()
if not os.path.isdir(TEMP_PATH):
os.makedirs(TEMP_PATH)
class Models:
FASTER_RCNN_RESNET_101 = "FASTER_RCNN_RESNET_101"
SSD_MOBILENET_V2 = "SSD_MOBILENET_V2"
@app.route('/api/v1/face_comparison', methods=['POST'])
def comparison():
try:
res = {}
if request.files.get('face_1') is not None and request.files.get('face_2') is not None:
face_1 = request.files['face_1']
face_2 = request.files['face_2']
res = fvw.predict([face_1, face_2])
elif request.form.get('face_1_url') is not None and request.form.get('face_2_url') is not None:
file_1 = request.form['face_1_url']
resp = requests.get(file_1)
affix = file_1.split('.')[-1]
temp_filename_1 = str(uuid.uuid4()) + '.' + affix
with open(temp_filename_1, 'wb') as fp:
fp.write(resp.content)
file_2 = request.form['face_2_url']
resp = requests.get(file_2)
affix = file_2.split('.')[-1]
temp_filename_2 = str(uuid.uuid4()) + '.' + affix
with open(temp_filename_2, 'wb') as fp:
fp.write(resp.content)
res = fvw.predict([open(temp_filename_1, 'rb'), open(temp_filename_2, 'rb')])
os.remove(temp_filename_1)
os.remove(temp_filename_2)
# res = fvw.predict([face_1, face_2])
print(res)
return jsonify({
"similarity": float(res[0][0]),
"Version": "Keras Application 1.0.7 @ Tensorflow 1.13 Backend",
"Model": {
"Info": "Siamese Network with Inception ResNet @ Iteration 4313 Epoch 4",
"url": "https://github.com/wylswz/twitter_pride_vanity/tree/master/algorithms/siamese_net",
"dataset": "vggface2",
}
})
except Exception as e:
traceback.print_exc()
return jsonify({
'error': str(e),
})
@app.route('/api/v1/face_detection', methods=['POST'])
def detection():
if request.method == 'POST':
model = request.form.get('model')
if model is None:
model = Models.FASTER_RCNN_RESNET_101
try:
ml_res = {
'detection_boxes': np.array([]),
'detection_scores': np.array([])
}
if request.files.get('image_file') is not None:
file = request.files['image_file']
if model == Models.FASTER_RCNN_RESNET_101:
ml_res = fdw.predict(file)
elif model == Models.SSD_MOBILENET_V2:
ml_res = ssdw.predict(file)
elif request.form.get('image_url') is not None:
file = request.form['image_url']
resp = requests.get(file)
affix = file.split('.')[-1]
temp_filename = str(uuid.uuid4()) + '.' + affix
with open(temp_filename, 'wb') as fp:
fp.write(resp.content)
if model == Models.FASTER_RCNN_RESNET_101:
ml_res = fdw.predict(open(temp_filename, 'rb'))
elif model == Models.SSD_MOBILENET_V2:
ml_res = ssdw.predict(open(temp_filename, 'rb'))
os.remove(temp_filename)
limit = request.form.get('limit')
faces = ml_res['detection_boxes'].tolist()
scores = ml_res['detection_scores'].tolist()
if limit is not None:
limit = int(limit)
faces = faces[:limit]
scores = scores[:limit]
return jsonify({
'faces': faces,
'scores': scores,
'format': ['ymin', 'xmin', 'ymax', 'xmax'],
'version': 'Tensorflow 1.13.1',
'model': {
"url": "https://github.com/tensorflow/models/tree/master/research/object_detection",
"info": "{0} Object-Detection".format(model),
"dataset": "WIDERFace"
}
})
except Exception as e:
traceback.print_exc()
return jsonify({
'error': str(e)
})
if __name__ == "__main__":
app.run(host='0.0.0.0', port=8000)
| [
"wylswz@icloud.com"
] | wylswz@icloud.com |
9b8c1f144c99095882b7063ecfbd996ea85ad76d | d2c4934325f5ddd567963e7bd2bdc0673f92bc40 | /tests/model_control/detailed/transf_Anscombe/model_control_one_enabled_Anscombe_PolyTrend_Seasonal_Second_MLP.py | 5f171286f0c82166bee8bda1463b423ed0a90ed9 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | jmabry/pyaf | 797acdd585842474ff4ae1d9db5606877252d9b8 | afbc15a851a2445a7824bf255af612dc429265af | refs/heads/master | 2020-03-20T02:14:12.597970 | 2018-12-17T22:08:11 | 2018-12-17T22:08:11 | 137,104,552 | 0 | 0 | BSD-3-Clause | 2018-12-17T22:08:12 | 2018-06-12T17:15:43 | Python | UTF-8 | Python | false | false | 162 | py | import pyaf.tests.model_control.test_ozone_custom_models_enabled as testmod
testmod.build_model( ['Anscombe'] , ['PolyTrend'] , ['Seasonal_Second'] , ['MLP'] ); | [
"antoine.carme@laposte.net"
] | antoine.carme@laposte.net |
0e298ed4aa62e6c117213f2b4ca3599c7a53721e | ed45e9d9d40a77e580fce6969d6d749d5dceecdb | /cond.4.py | bf2b25502284fb3e0dbfb3663b2309c02c0499e9 | [] | no_license | shlomi77/shlomi | ffb53380f7dcbfa2400cbc7bcc585579e2fbc390 | bf21ea0202b2fed85e35a9c2296472c7fdd61dca | refs/heads/master | 2022-11-28T10:51:42.886751 | 2020-07-14T12:01:12 | 2020-07-14T12:01:12 | 279,573,093 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 153 | py | num1= int (input("enter first number: "))
num2= int (input("enter second number: "))
num3 = num2-num1
if num3<0:
print(num3*-1)
else:
print(num3) | [
"60435684+shlomi77@users.noreply.github.com"
] | 60435684+shlomi77@users.noreply.github.com |
7d92713f9040d395e3c6e1e41acc82924c19a240 | 00a4a83470844856735efdf8d5249fce6e3f0cfb | /tools/evaluate1.py | ddc5e14f9577f3d8da9f055ff2d7d009237355cf | [
"MIT"
] | permissive | caiyancheng/DATA130050.01-Computer-Vision | 2f0296c70c2ee0fc9a8f81b606b7d4bbd0166e12 | 6ebc597c4ed96b3c59425c24a2dc81f49e49c5b0 | refs/heads/main | 2023-04-12T11:27:01.037078 | 2021-04-30T13:29:20 | 2021-04-30T13:29:20 | 358,606,293 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 9,475 | py | #!/usr/bin/python
# -*- encoding: utf-8 -*-
import sys
sys.path.insert(0, '.')
import os
import os.path as osp
import logging
import argparse
import math
from tabulate import tabulate
from tqdm import tqdm
import numpy as np
import cv2
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
from lib.models import model_factory
from configs import cfg_factory
from lib.logger import setup_logger
from lib.cityscapes_cv2 import get_data_loader
class MscEvalV0(object):
def __init__(self, scales=(0.5, ), flip=False, ignore_label=255):
self.scales = scales
self.flip = flip
self.ignore_label = ignore_label
def __call__(self, net, dl, n_classes):
## evaluate
hist = torch.zeros(n_classes, n_classes).cuda().detach()
if dist.is_initialized() and dist.get_rank() != 0:
diter = enumerate(dl)
else:
diter = enumerate(tqdm(dl))
for i, (imgs, label) in diter:
# print(label.shape)
N, _, H, W = label.shape
label = label.squeeze(1).cuda()
size = label.size()[-2:]
probs = torch.zeros(
(N, n_classes, H, W), dtype=torch.float32).cuda().detach()
for scale in self.scales:
sH, sW = int(scale * H), int(scale * W)
im_sc = F.interpolate(imgs, size=(sH, sW),
mode='bilinear', align_corners=True)
im_sc = im_sc.cuda()
logits = net(im_sc)[0]
logits = F.interpolate(logits, size=size,
mode='bilinear', align_corners=True)
probs += torch.softmax(logits, dim=1)
if self.flip:
im_sc = torch.flip(im_sc, dims=(3, ))
logits = net(im_sc)[0]
logits = torch.flip(logits, dims=(3, ))
logits = F.interpolate(logits, size=size,
mode='bilinear', align_corners=True)
probs += torch.softmax(logits, dim=1)
preds = torch.argmax(probs, dim=1)
keep = label != self.ignore_label
# print(keep.size())
# print(label[keep].size())
hist += torch.bincount(
label[keep] * n_classes + preds[keep],
minlength=n_classes ** 2
).view(n_classes, n_classes)
if dist.is_initialized():
dist.all_reduce(hist, dist.ReduceOp.SUM)
ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
miou = ious.mean()
return miou.item()
class MscEvalCrop(object):
def __init__(
self,
cropsize=1024,
cropstride=2./3,
flip=True,
scales=[0.5, 0.75, 1, 1.25, 1.5, 1.75],
lb_ignore=255,
):
self.scales = scales
self.ignore_label = lb_ignore
self.flip = flip
self.distributed = dist.is_initialized()
self.cropsize = cropsize if isinstance(cropsize, (list, tuple)) else (cropsize, cropsize)
self.cropstride = cropstride
def pad_tensor(self, inten):
N, C, H, W = inten.size()
cropH, cropW = self.cropsize
if cropH < H and cropW < W: return inten, [0, H, 0, W]
padH, padW = max(cropH, H), max(cropW, W)
outten = torch.zeros(N, C, padH, padW).cuda()
outten.requires_grad_(False)
marginH, marginW = padH - H, padW - W
hst, hed = marginH // 2, marginH // 2 + H
wst, wed = marginW // 2, marginW // 2 + W
outten[:, :, hst:hed, wst:wed] = inten
return outten, [hst, hed, wst, wed]
def eval_chip(self, net, crop):
prob = net(crop)[0].softmax(dim=1)
if self.flip:
crop = torch.flip(crop, dims=(3,))
prob += net(crop)[0].flip(dims=(3,)).softmax(dim=1)
prob = torch.exp(prob)
return prob
def crop_eval(self, net, im, n_classes):
cropH, cropW = self.cropsize
stride_rate = self.cropstride
im, indices = self.pad_tensor(im)
N, C, H, W = im.size()
strdH = math.ceil(cropH * stride_rate)
strdW = math.ceil(cropW * stride_rate)
n_h = math.ceil((H - cropH) / strdH) + 1
n_w = math.ceil((W - cropW) / strdW) + 1
prob = torch.zeros(N, n_classes, H, W).cuda()
prob.requires_grad_(False)
for i in range(n_h):
for j in range(n_w):
stH, stW = strdH * i, strdW * j
endH, endW = min(H, stH + cropH), min(W, stW + cropW)
stH, stW = endH - cropH, endW - cropW
chip = im[:, :, stH:endH, stW:endW]
prob[:, :, stH:endH, stW:endW] += self.eval_chip(net, chip)
hst, hed, wst, wed = indices
prob = prob[:, :, hst:hed, wst:wed]
return prob
def scale_crop_eval(self, net, im, scale, n_classes):
N, C, H, W = im.size()
new_hw = [int(H * scale), int(W * scale)]
im = F.interpolate(im, new_hw, mode='bilinear', align_corners=True)
prob = self.crop_eval(net, im, n_classes)
prob = F.interpolate(prob, (H, W), mode='bilinear', align_corners=True)
return prob
@torch.no_grad()
def __call__(self, net, dl, n_classes):
dloader = dl if self.distributed and not dist.get_rank() == 0 else tqdm(dl)
hist = torch.zeros(n_classes, n_classes).cuda().detach()
hist.requires_grad_(False)
for i, (imgs, label) in enumerate(dloader):
imgs = imgs.cuda()
label = label.squeeze(1).cuda()
N, H, W = label.shape
probs = torch.zeros((N, n_classes, H, W)).cuda()
probs.requires_grad_(False)
for sc in self.scales:
probs += self.scale_crop_eval(net, imgs, sc, n_classes)
torch.cuda.empty_cache()
preds = torch.argmax(probs, dim=1)
keep = label != self.ignore_label
hist += torch.bincount(
label[keep] * n_classes + preds[keep],
minlength=n_classes ** 2
).view(n_classes, n_classes)
if self.distributed:
dist.all_reduce(hist, dist.ReduceOp.SUM)
ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag())
miou = ious.mean()
return miou.item()
@torch.no_grad()
def eval_model(net, ims_per_gpu, im_root, im_anns):
is_dist = dist.is_initialized()
dl = get_data_loader(im_root, im_anns, ims_per_gpu, None,
None, mode='val', distributed=is_dist)
net.eval()
heads, mious = [], []
logger = logging.getLogger()
single_scale = MscEvalV0((1., ), False)
mIOU = single_scale(net, dl, 19)
heads.append('single_scale')
mious.append(mIOU)
logger.info('single mIOU is: %s\n', mIOU)
single_crop = MscEvalCrop(
cropsize=1024,
cropstride=2. / 3,
flip=False,
scales=(1., ),
lb_ignore=255,
)
mIOU = single_crop(net, dl, 19)
heads.append('single_scale_crop')
mious.append(mIOU)
logger.info('single scale crop mIOU is: %s\n', mIOU)
ms_flip = MscEvalV0((0.5, 0.75, 1, 1.25, 1.5, 1.75), True)
mIOU = ms_flip(net, dl, 19)
heads.append('ms_flip')
mious.append(mIOU)
logger.info('ms flip mIOU is: %s\n', mIOU)
ms_flip_crop = MscEvalCrop(
cropsize=1024,
cropstride=2. / 3,
flip=True,
scales=(0.5, 0.75, 1.0, 1.25, 1.5, 1.75),
lb_ignore=255,
)
mIOU = ms_flip_crop(net, dl, 19)
heads.append('ms_flip_crop')
mious.append(mIOU)
logger.info('ms crop mIOU is: %s\n', mIOU)
return heads, mious
def evaluate(cfg, weight_pth):
logger = logging.getLogger()
## model
logger.info('setup and restore model')
net = model_factory[cfg.model_type](19)
# net = BiSeNetV2(19)
net.load_state_dict(torch.load(weight_pth))
net.cuda()
is_dist = dist.is_initialized()
if is_dist:
local_rank = dist.get_rank()
net = nn.parallel.DistributedDataParallel(
net,
device_ids=[local_rank, ],
output_device=local_rank
)
## evaluator
heads, mious = eval_model(net, 2, cfg.im_root, cfg.val_im_anns)
logger.info(tabulate([mious, ], headers=heads, tablefmt='orgtbl'))
def parse_args():
parse = argparse.ArgumentParser()
parse.add_argument('--local_rank', dest='local_rank',
type=int, default=-1,)
parse.add_argument('--weight-path', dest='weight_pth', type=str,
default='/remote-home/source/42/cyc19307140030/BisenetV1_new/tools/res/noSp_v1_2021419.pth',)
parse.add_argument('--port', dest='port', type=int, default=44553,)
parse.add_argument('--model', dest='model', type=str, default='bisenetv1',)
return parse.parse_args()
def main():
args = parse_args()
cfg = cfg_factory[args.model]
if not args.local_rank == -1:
torch.cuda.set_device(args.local_rank)
dist.init_process_group(backend='nccl',
init_method='tcp://127.0.0.1:{}'.format(args.port),
world_size=torch.cuda.device_count(),
rank=args.local_rank
)
if not osp.exists(cfg.respth): os.makedirs(cfg.respth)
setup_logger('{}-eval'.format(cfg.model_type), cfg.respth)
evaluate(cfg, args.weight_pth)
if __name__ == "__main__":
main()
| [
"19307140030@fudan.edu.cn"
] | 19307140030@fudan.edu.cn |
72b84609b62e01d63202d91ce2a4cecf241a1404 | e34ef37a8091ec1e09e56ca64f05137eff7ab5d5 | /blog/views.py | abec40abc377d0b8ee66aa1dba6cef3fdcda7a62 | [] | no_license | zyx124/mysite | 24d502750e98abd96316bbc2988a393437603552 | c86ef700838e9b93175ee47f4d426a4855c6e41a | refs/heads/master | 2020-06-24T07:25:08.560096 | 2019-12-18T15:48:56 | 2019-12-18T15:48:56 | 198,895,260 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,765 | py | from django.shortcuts import render, get_object_or_404
from django.http import HttpResponse
from django.utils.text import slugify
from markdown.extensions.toc import TocExtension
from .models import Post, Category, Tag
import markdown
import re
def index(request):
post_list = Post.objects.all().order_by('-created_time')
return render(request, 'blog/index.html', context={
'post_list': post_list
})
def archive(request, year, month):
post_list = Post.objects.filter(created_time__year=year,
created_time__month=month
).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
def category(request, pk):
cate = get_object_or_404(Category, pk=pk)
post_list = Post.objects.filter(category=cate).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
def tag(request, pk):
t = get_object_or_404(Tag, pk=pk)
post_list = Post.objects.filter(tags=t).order_by('-created_time')
return render(request, 'blog/index.html', context={'post_list': post_list})
def detail(request, pk):
post = get_object_or_404(Post, pk=pk)
# number of views
post.increase_views()
md = markdown.Markdown(extensions=[
'markdown.extensions.extra',
'markdown.extensions.codehilite',
TocExtension(slugify=slugify),
])
post.body = md.convert(post.body)
m = re.search(r'<div class="toc">\s*<ul>(.*)</ul>\s*</div>', md.toc, re.S)
post.toc = m.group(1) if m is not None else ''
return render(request, 'blog/detail.html', context={'post': post})
def contact(request):
return render(request, 'blog/contact.html') | [
"zhaoyuxin124@gmail.com"
] | zhaoyuxin124@gmail.com |
436ccf2a85485e30cb63483d4ecbf2bbc4ceaa34 | 5c3ecd6974ebd714c42869081313614ba4f602a7 | /chapter_08/8-11_archived_messages.py | 892cc43cb0240b5730aca49379f7ac8ac7b5569e | [] | no_license | iloverugs/pcc_2e_student | fd491cb4d3c0546718554073a98386d8d7a499ec | 1c3ad17467c3d8deea9f3566b5eb66cbc6358a6d | refs/heads/main | 2023-04-13T01:17:21.164991 | 2021-04-23T01:09:23 | 2021-04-23T01:09:23 | 354,342,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 621 | py | # List containing series of short text messages
txt_msg_list = ['blahblahblah', 'imnotagossipbut', 'chatterchatterchatter']
snt_msgs = []
def show_messages(txt_msgs):
"""print each text message in list"""
for txt_msg in txt_msgs:
print(txt_msg)
def send_messages(txt_msgs):
""" Print each text message and moves each message to sent_message list"""
while txt_msgs:
txt_msg = txt_msgs.pop()
print(txt_msg)
snt_msgs.append(txt_msg)
# Call send_messages() with copy of list of messages.
send_messages(txt_msg_list[:])
# print both lists
print(txt_msg_list)
print(snt_msgs) | [
"81831508+iloverugs@users.noreply.github.com"
] | 81831508+iloverugs@users.noreply.github.com |
ff74906e1d238386e472c15bcbccc2be7332b80d | 07c6d3055eda7b1ddb16ce9444166ed311ce3219 | /clioinfra/api/api.py | 10ccb10f6b4e9681ba2e04d6ad25cbc7680efb5d | [] | no_license | IISH/dpe | 4df9b0576b5419e543c61ce9ef14380ddc4b5c03 | 6509b06aa03242f450766d4cb5d8984f14146b11 | refs/heads/master | 2021-01-10T17:52:54.775316 | 2016-05-04T09:50:46 | 2016-05-04T09:50:46 | 42,994,984 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 41,817 | py | # Copyright (C) 2015-2016 International Institute of Social History.
# @author Vyacheslav Tykhonov <vty@iisg.nl>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License, version 3,
# as published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# As a special exception, the copyright holders give permission to link the
# code of portions of this program with the OpenSSL library under certain
# conditions as described in each individual source file and distribute
# linked combinations including the program with the OpenSSL library. You
# must comply with the GNU Affero General Public License in all respects
# for all of the code used other than as permitted herein. If you modify
# file(s) with this exception, you may extend this exception to your
# version of the file(s), but you are not obligated to do so. If you do not
# wish to do so, delete this exception statement from your version. If you
# delete this exception statement from all source files in the program,
# then also delete it in the license file.
from flask import Flask, redirect, make_response, Response, render_template, request, send_from_directory
from twisted.web import http
import webbrowser
import json
import simplejson
import urllib2
import glob
import csv
import xlwt
import os
import shutil
import sys
import pprint
import collections
import ast
import getopt
import numpy as np
import stat
import pandas as pd
import random
import ConfigParser
from subprocess import Popen, PIPE, STDOUT
from random import randint
import brewer2mpl
import string
import pylab as plt
import re
from urllib import urlopen
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '../../modules')))
from advancedstatistics import loadpanel, statistics2table, handle2statistics, data2statistics, read_measure, statistics_tojson, advpanel2dict
from search import dataset_search, getindicators, dataverse_search, loadjson
import random, string
from download import get_papers, dataset2zip, compile2zip
from tabulardata import loadcodes, dataset_to_csv, load_api_data, countryset, json_dict, createframe, combinedata, data2panel, moderncodes, data2json
from config import configuration, dataverse2indicators, load_dataverse, findpid, load_metadata, load_fullmetadata, pidfrompanel
import matplotlib as mpl
from palettable.colorbrewer.sequential import Greys_8
from data2excel import panel2excel, individual_dataset
from historical import load_historical, histo
from scales import getcolors, showwarning, buildcategories, getscales, floattodec, combinerange, webscales, value2scale
from storage import *
from paneldata import build_panel, paneldatafilter, panel2dict, panel2csv
from datasets import *
from datacompiler import dataframe_compiler
from data2excel import panel2excel
from cliocore.configutils import Configuration, Utils, DataFilter
from cliocore.datasets import Dataset
# Function to create json from dict
def json_generator(c, jsondataname, data):
sqlnames = [desc[0] for desc in c.description]
jsonlist = []
jsonhash = {}
for valuestr in data:
datakeys = {}
for i in range(len(valuestr)):
name = sqlnames[i]
value = valuestr[i]
datakeys[name] = value
#print "%s %s", (name, value)
jsonlist.append(datakeys)
jsonhash[jsondataname] = jsonlist;
json_string = json.dumps(jsonhash, encoding="utf-8", sort_keys=True, indent=4)
return json_string
def randomword(length):
return ''.join(random.choice(string.lowercase) for i in range(length))
# Check if param is location
def is_location(param):
flag = re.match(r"c\[\d+\]", param)
try:
true = flag.group(0)
except:
true = 0
return true
# Download ZIP archive
def downloadzip(pid):
DEBUG = 0
(fullpath) = ('')
fullmetadata = {}
logscale = 0
config = configuration()
config['remote'] = 'on'
API_TOKEN = config['key']
HOSTNAME = config['dataverseroot']
cmd = "--insecure -u " + API_TOKEN + ": " + HOSTNAME + "/dvn/api/data-deposit/v1.1/swordv2/statement/study/"
tmpdir = config['tmpdir']
filerandom = randomword(10)
#filerandom = '12345'
arc = "data" + filerandom + ".zip"
filename = filerandom
finaldir = config['path'] + '/static/tmp'
# ToDO
if filename:
finaldir = str(finaldir) + '/' + str(filename)
tmpdir = str(tmpdir) + '/' + str(filename)
try:
os.mkdir(tmpdir)
os.mkdir(finaldir)
except:
donothing = 'ok'
customyear = ''
fromyear = request.args.get('y[min]')
toyear = request.args.get('y[max]')
historical = request.args.get('type[0]')
(handles, pidslist) = pidfrompanel(pid)
try:
if pidslist:
fullmetadata = load_fullmetadata(pidslist)
except:
showwarning = 1
# Log scales switch
if request.args.get('logscale'):
logscale = 1
# Select countries
customcountrycodes = ''
f = request.args
for key in f.keys():
if is_location(key):
for value in sorted(f.getlist(key)):
customcountrycodes = str(customcountrycodes) + str(value) + ','
if customcountrycodes:
customcountrycodes = customcountrycodes[:-1]
if handles:
if historical:
api = config['apiroot'] + "/collabs/static/data/historical.json"
(regions, countries, ctr2reg, webmapper, geocoder) = histo(api, '')
hist = countries
else:
hist = ''
(classification, geodataset, title, units) = content2dataframe(config, config['geocoderhandle'])
#geocoder = buildgeocoder(dataset, config)
(modern, historical) = loadgeocoder(config, dataset, 'geocoder')
for handle in handles:
#if remote:
# (class1, dataset) = loaddataset_fromurl(config, handle)
#else:
# dataset = loaddataset(handles)
#(cfilter, notint) = selectint(activeindex.values)
#(moderndata, historicaldata) = loadgeocoder(dataset, '')
# CHANGE
#return str(dataset.index)
(header, panelcells, codes, datahub, data, handle2ind, unit2ind, original) = data2panel(handles, customcountrycodes, fromyear, toyear, customyear, hist, logscale)
filename = filename + '.xls'
fullpath = panel2excel(finaldir, filename, header, panelcells, fullmetadata)
else:
# Clio format download
zipfile = get_papers(HOSTNAME, API_TOKEN, cmd, pid, tmpdir, arc, finaldir)
(alonepid, revid, cliohandle, clearpid) = findpid(pid)
if alonepid:
handles = [ clearpid ]
for pid in handles:
if historical:
api = config['apiroot'] + "/collabs/static/data/historical.json"
(regions, countries, ctr2reg, webmapper, geocoder) = histo(api, '')
hist = countries
else:
hist = ''
filename = filename + '.xls'
# 2DEBUG
(header, panelcells, codes, datahub, data, handle2ind, unit2ind, originalvalues) = data2panel(handles, customcountrycodes, fromyear, toyear, customyear, hist, logscale)
#codes = hist
#return str(fullmetadata)
metadata = fullmetadata
result = individual_dataset(finaldir, filename, handle2ind[pid], unit2ind[pid], datahub, data[pid], codes, metadata)
try:
for everypid in handles:
# Download papers
zipfile = get_papers(HOSTNAME, API_TOKEN, cmd, everypid, tmpdir, arc, finaldir)
except:
nopapers = 1
compile2zip(finaldir, arc)
filename = arc
return filename
def tableapi():
# years in filter
config = configuration()
switch = 'modern'
datafilter = {}
datafilter['ctrlist'] = ''
customyear = ''
fromyear = '1500'
datafilter['startyear'] = fromyear
toyear = '2012'
datafilter['endyear'] = toyear
customcountrycodes = ''
(aggr, logscale, dataset, handles) = ('','','',[])
# Select countries
f = request.args
for key in f.keys():
if key == 'loc':
for value in sorted(f.getlist(key)):
if value:
customcountrycodes = str(customcountrycodes) + str(value) + ','
if customcountrycodes:
customcountrycodes = customcountrycodes[:-1]
#handle = "F16UDU"
# HANDLE
if request.args.get('handle'):
handledataset = request.args.get('handle')
try:
(pids, pidslist) = pidfrompanel(handledataset)
handles.append(pids[0])
except:
handles.append(handledataset)
nopanel = 'yes'
if request.args.get('dataset'):
dataset = request.args.get('dataset')
if request.args.get('hist'):
switch = 'historical'
if request.args.get('ctrlist'):
customcountrycodes = ''
tmpcustomcountrycodes = request.args.get('ctrlist')
c = tmpcustomcountrycodes.split(',')
for ids in sorted(c):
if ids:
customcountrycodes = str(customcountrycodes) + str(ids) + ','
customcountrycodes = customcountrycodes[:-1]
datafilter['ctrlist'] = customcountrycodes
if not customcountrycodes:
customcountrycodes = '528'
if request.args.get('yearmin'):
fromyear = request.args.get('yearmin')
datafilter['startyear'] = fromyear
if request.args.get('yearmax'):
toyear = request.args.get('yearmax')
datafilter['endyear'] = toyear
if request.args.get('aggr'):
aggr = request.args.get('aggr')
# Log scales switch
if request.args.get('logscale'):
logscale = request.args.get('logscale')
DEBUG = 0
old = ''
if old:
apifile = str(dataset) + ".json"
jsonapi = config['apiroot'] + "/collabs/static/data/" + apifile
dataframe = load_api_data(jsonapi, '')
loccodes = loadcodes(dataframe)
(ctr, header) = countryset(customcountrycodes, loccodes)
indicator = ''
(frame, years, values, dates, original) = createframe(indicator, loccodes, dataframe, customyear, fromyear, toyear, ctr, logscale, DEBUG)
names = ['indicator', 'm', 'ctrcode', 'country', 'year', 'intcode', 'value', 'id']
(csvdata, aggrdata) = combinedata(ctr, frame, loccodes)
# New version is fast
else:
(geocoder, geolist, oecd2webmapper, modern, historical) = request_geocoder(config, '')
(origdata, maindata, metadata) = request_datasets(config, switch, modern, historical, handles, geolist)
(subsets, panel) = ({}, [])
for handle in handles:
(datasubset, ctrlist) = datasetfilter(maindata[handle], datafilter)
if not datasubset.empty:
datasubset = datasubset.dropna(how='all')
panel.append(datasubset)
subsets[handle] = datasubset
classification = modern
if switch == 'historical':
classification = historical
(csvdata, aggrdata) = dataset_to_csv(config, subsets[handles[0]], classification)
if aggr:
csvdata = aggrdata
return (csvdata, aggrdata)
# Search API
def simplesearch(root, qurl, apiroot):
# Load topics
#topicurl = apiroot + "/collabs/static/data/dataframe100_0.json"
topicurl = apiroot + "/api/datasets?handle=Panel[%27hdl:10622/0PCZX5%27]"
topicsframe = loadjson(topicurl)
for item in topicsframe:
topics = item['data']
# Input
IDS = getindicators(qurl)
datasets = dataset_search(root, IDS, topics)
return datasets
def load_indicators(filename):
config = configuration()
csvfile = config['clearance'] + "/collabs/static/data/" + filename
ufile = urlopen(csvfile)
data = pd.read_csv(ufile, delimiter='\t')
df = data
if csvfile:
d = [
dict([
(colname, row[i])
for i,colname in enumerate(df.columns)
])
for row in df.values
]
return json.dumps(d)
def load_api_data1(apiurl, fileID):
jsondataurl = apiurl
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
dataframe = simplejson.load(f, "utf-8")
return dataframe
def searchdata(query):
config = configuration()
searchapi = config['dataverseroot'] + "/api/search?q=" + query + "&key=" + config['key']
dataframe = load_api_data(searchapi, '')
return json.dumps(dataframe)
def medianlimits(dataframe):
scale = []
frame1 = []
frame2 = []
avg = dataframe.median()
for value in dataframe:
if value <= avg:
frame1.append(value)
else:
frame2.append(value)
avg1 = pd.DataFrame(frame1).median()
avg2 = pd.DataFrame(frame2).median()
return (dataframe.min(), int(avg1), int(avg), int(avg2), dataframe.max())
def meanlimits(dataframe):
scale = []
frame1 = []
frame2 = []
avg = dataframe.mean()
for value in dataframe:
if value <= avg:
frame1.append(value)
else:
frame2.append(value)
avg1 = pd.DataFrame(frame1).mean()
avg2 = pd.DataFrame(frame2).mean()
return (dataframe.min(), int(avg1), int(avg), int(avg2), dataframe.max())
def round_it(x):
g = round(x)
if request.args.get('code'):
m = r'LCI'
isindex = re.match(m, request.args.get('code'))
if isindex:
g = float("{0:.5f}".format(x))
return g
app = Flask(__name__)
@app.route('/')
def test():
description = 'nlgis2 API Service v.0.1<br>/api/maps (map polygons)<br>/api/data (data services)<br>'
return description
@app.route('/demo')
def demo():
sql = "select * from datasets.topics where 1=1";
sql = sqlfilter(sql)
return sql
@app.route('/progress')
def open():
root = ''
resp = make_response(render_template('progress.html', download=root))
return resp
@app.route('/treemap')
def treemapweb():
(thisyear, datafilter, yearmin, lastyear, handles) = (0, {}, 1500, 2010, [])
(action, switch, geodataset) = ('', 'modern', '')
config = configuration()
datafilter['startyear'] = yearmin
datafilter['endyear'] = lastyear
datafilter['ctrlist'] = ''
handle = ''
if request.args.get('handle'):
handledataset = request.args.get('handle')
try:
(pids, pidslist) = pidfrompanel(handledataset)
handle = pids[0]
handles.append(handle)
except:
handles.append(handledataset)
nopanel = 'yes'
if request.args.get('face'):
handle = request.args.get('face')
handles.append(handle)
if request.args.get('year'):
thisyear = request.args.get('year')
if request.args.get('action'):
action = request.args.get('action')
if request.args.get('ctrlist'):
datafilter['ctrlist'] = request.args.get('ctrlist')
if int(thisyear) > 0:
datafilter['startyear'] = int(thisyear)
datafilter['endyear'] = int(thisyear)
if request.args.get('historical'):
switch = 'historical'
# Geocoder
(classification, geodataset, title, units) = content2dataframe(config, config['geocoderhandle'])
#(modern, historical) = loadgeocoder(config, geodataset, 'geocoder')
(geocoder, geolist, oecd2webmapper, modern, historical) = request_geocoder(config, '')
if switch == 'modern':
activeindex = modern.index
coder = modern
class1 = switch
else:
activeindex = historical.index
coder = historical
class1 = switch
# Loading dataset in dataframe
try:
(class1, dataset, title, units) = content2dataframe(config, handle)
except:
return 'No dataset ' + handle
(cfilter, notint) = selectint(activeindex.values)
(origdata, maindata, metadata) = request_datasets(config, switch, modern, historical, handles, geolist)
(subsets, panel) = ({}, [])
# Show only available years
if action == 'showyears':
years = []
datafilter['startyear'] = yearmin
datafilter['endyear'] = lastyear
(datasubset, ctrlist) = datasetfilter(maindata[handles[0]], datafilter)
# Remove years without any values
if not datafilter['ctrlist']:
if np.nan in datasubset.index:
datasubset = datasubset.drop(np.nan, axis=0)
for colyear in datasubset.columns:
if datasubset[colyear].count() == 0:
datasubset = datasubset.drop(colyear, axis=1)
(years, notyears) = selectint(datasubset.columns)
# YEARS
return Response(json.dumps(years), mimetype='application/json')
# Process all indicators
for handle in handles:
(datasubset, ctrlist) = datasetfilter(maindata[handle], datafilter)
if not datasubset.empty:
#datasubset = datasubset.dropna(how='all')
if not datafilter['ctrlist']:
if np.nan in datasubset.index:
datasubset = datasubset.drop(np.nan, axis=0)
panel.append(datasubset)
subsets[handle] = datasubset
maindata = subsets[handles[0]]
treemapdata = buildtreemap(config, maindata, switch, cfilter, coder)
return Response(treemapdata, mimetype='application/json')
# Panel data
@app.route('/panel')
def panel():
(thisyear, datafilter, handle, yearmin, yearmax, thisyear, ctrlist, lastyear, logscale) = (0, {}, '', '1500', '2020', 1950, '', 2010, '')
handles = []
config = configuration()
datafilter['startyear'] = yearmin
datafilter['endyear'] = lastyear
datafilter['ctrlist'] = config['ctrlist']
#modern = moderncodes(config['modernnames'], config['apiroot'])
if request.args.get('handle'):
handle = str(request.args.get('handle'))
handle = handle.replace(" ", "")
handle = handle.replace("'", "")
try:
(pids, pidslist) = pidfrompanel(handle)
handles = pids
except:
nopanel = 'yes'
handles.append(handle)
if request.args.get('face'):
facehandle = request.args.get('face')
if facehandle not in handles:
handles.append(facehandle)
if request.args.get('dataset'):
dataset = request.args.get('dataset')
if request.args.get('ctrlist'):
customcountrycodes = ''
ctrlist = request.args.get('ctrlist')
datafilter['ctrlist'] = ctrlist
if request.args.get('logscale'):
logscale = request.args.get('logscale')
if request.args.get('year'):
thisyear = request.args.get('year')
datafilter['startyear'] = int(thisyear)
datafilter['endyear'] = int(thisyear)
if request.args.get('yearmin'):
fromyear = request.args.get('yearmin')
datafilter['startyear'] = fromyear
if request.args.get('yearmax'):
toyear = request.args.get('yearmax')
datafilter['endyear'] = toyear
if request.args.get('hist'):
switch = 'historical'
if datafilter['ctrlist'] == '':
datafilter['ctrlist'] = config['histctrlist']
else:
switch = 'modern'
(geocoder, geolist, oecd2webmapper, modern, historical) = request_geocoder(config, '')
(origdata, maindata, metadata) = request_datasets(config, switch, modern, historical, handles, geolist)
(subsets, subsetyears, panel) = ({}, [], [])
for handle in handles:
(datasubset, ctrlist) = datasetfilter(maindata[handle], datafilter)
datasubset['handle'] = handle
if not datasubset.empty:
datasubset = datasubset.dropna(how='all')
try:
if np.nan in datasubset.index:
datasubset = datasubset.drop(np.nan, axis=0)
except:
skip = 'yes'
for year in datasubset:
if datasubset[year].count() == 0:
datasubset = datasubset.drop(year, axis=1)
(datayears, notyears) = selectint(datasubset.columns)
panel.append(datasubset)
subsets[handle] = datasubset
subsetyears.append(datayears)
dataframe = subsets
ctrlimit = 10
# Trying to find the best year with most filled data values
try:
bestyearlist = subsetyears[0]
for i in range(1,len(subsetyears)):
bestyearlist = list(set(bestyearlist) & set(subsetyears[i]))
#bestyearlist = bestyearlist.sort()
thisyear = bestyearlist[0]
except:
bestyearlist = []
allcodes = {}
panel = []
names = {}
for handle in dataframe:
try:
names[handle] = metadata[handle]['title']
except:
names[handle] = 'title'
try:
#(dataset, codes) = paneldatafilter(dataframe[handle], int(yearmin), int(yearmax), ctrlist, handle)
dataset = dataframe[handle]
if not dataset.empty:
panel.append(dataset)
except:
nodata = 0
if panel:
totalpanel = pd.concat(panel)
cleanedpanel = totalpanel.dropna(axis=1, how='any')
cleanedpanel = totalpanel
#return str(cleanedpanel.to_html())
totalpanel = cleanedpanel
if int(thisyear) <= 0:
thisyear = totalpanel.columns[-2]
result = ''
original = {}
if thisyear:
if switch == 'historical':
geocoder = historical
if switch == 'hist':
geocoder = historical
else:
geocoder = modern
result = 'Country,'
for handle in handles:
result = result + str(metadata[handle]['title']) + ','
result = result[:-1]
known = {}
for code in totalpanel.index:
if str(code) not in known:
result = result + '\n' + str(geocoder.ix[int(code)][config['webmappercountry']])
for handle in handles:
tmpframe = totalpanel.loc[totalpanel['handle'] == handle]
try:
(thisval, original) = value2scale(tmpframe.ix[code][thisyear], logscale, original)
except:
thisval = 'NaN'
result = result + ',' + str(thisval)
known[str(code)] = code
return Response(result, mimetype='text/plain')
(allyears, notyears) = selectint(cleanedpanel.columns)
(codes, notcodes) = selectint(cleanedpanel.index)
cleanedpanel.index = codes
(header, data, countries, handles, vhandles) = panel2dict(config, cleanedpanel, names)
#return str(data)
#thisyear = 1882
#return str(countries)
#return str(countries)
years = []
for year in sorted(data):
try:
years.append(int(year))
lastyear = year
except:
skip = 1
# Return only years
if request.args.get('showyears'):
yearsdata = {}
yearsdata['years'] = years
yearsdata['latestyear'] = lastyear
#yearsdata['data'] = data
yearsjson = json.dumps(yearsdata, ensure_ascii=False, sort_keys=True, indent=4)
return Response(yearsjson, mimetype='application/json')
return Response(result, mimetype='text/plain')
# Collabs
@app.route('/collabs')
def collabs():
remove = ["date", "_id", "passwd"]
(project, jsondata) = ('', '')
data = {}
if request.args.get('project'):
project = request.args.get('project')
data = readdata('projects', 'uri', project)
for item in data:
for r in remove:
if item[r]:
del item[r]
jsondata = json.dumps(item, encoding="utf-8", sort_keys=True, indent=4)
return Response(jsondata, mimetype='application/json')
# Advanced statistiscs
@app.route('/advancedstats')
def advanced_statistics():
(yearmin, yearmax, ctrlist) = (1500, 2020, '')
config = configuration()
handles = []
if request.args.get('handle'):
handledataset = request.args.get('handle')
handledataset = handledataset.replace(" ", '')
if request.args.get('dataset'):
dataset = request.args.get('dataset')
handles.append(dataset)
if request.args.get('yearmin'):
yearmin = request.args.get('yearmin')
if request.args.get('yearmax'):
yearmax = request.args.get('yearmax')
if request.args.get('ctrlist'):
ctrlist = request.args.get('ctrlist')
modern = moderncodes(config['modernnames'], config['apiroot'])
jsonapi = config['apiroot'] + '/api/datasets?handle=' + str(handledataset)
(panel, cleanedpanel, names) = loadpanel(jsonapi, yearmin, yearmax, ctrlist)
(header, data, countries, handles, vhandles) = advpanel2dict(cleanedpanel)
ctrlimit = 200
#result = panel2csv(header, data, thisyear, countries, handles, vhandles, ctrlimit, modern)
#maindataframe = data2statistics(handles, cleanedpanel)
#showhtml = statistics_tojson(maindataframe, modern)
data = handle2statistics(handles, cleanedpanel)
showhtml = statistics2table(data)
return showhtml
# Dataverse API
@app.route('/download')
def download():
(classification, pid, root, switch, datafile) = ('modern', '', '', 'modern', '')
handle = ''
config = configuration()
cmd = "--insecure -u " + config['key'] + ": " + config['dataverseroot'] + "/dvn/api/data-deposit/v1.1/swordv2/statement/study/"
config['remote'] = ''
datafilter = {}
datafilter['startyear'] = '1500'
datafilter['endyear'] = '2010'
datafilter['ctrlist'] = ''
tmpdir = config['tmpdir']
filerandom = randomword(10)
#filerandom = '12345'
arc = "data" + filerandom + ".zip"
filename = filerandom
finaldir = config['path'] + '/static/tmp'
# ToDO
if filename:
finaldir = str(finaldir) + '/' + str(filename)
tmpdir = str(tmpdir) + '/' + str(filename)
try:
os.mkdir(tmpdir)
os.mkdir(finaldir)
except:
donothing = 'ok'
if request.args.get('handle'):
handle = request.args.get('handle')
if request.args.get('type[0]') == 'historical':
classification = request.args.get('type[0]')
switch = classification
if request.args.get('y[min]'):
datafilter['startyear'] = request.args.get('y[min]')
if request.args.get('y[max]'):
datafilter['endyear'] = request.args.get('y[max]')
# Select countries
customcountrycodes = ''
f = request.args
for key in f.keys():
if is_location(key):
for value in sorted(f.getlist(key)):
customcountrycodes = str(customcountrycodes) + str(value) + ','
if customcountrycodes:
customcountrycodes = customcountrycodes[:-1]
datafilter['ctrlist'] = customcountrycodes
if request.args.get('ctrlist'):
datafilter['ctrlist'] = request.args.get('ctrlist')
if request.args.get('pid'):
pid = request.args.get('pid')
ispanel = ''
try:
(pids, pidslist) = pidfrompanel(pid)
handles = pids
handle = pids[0]
match = re.match(r'Panel\[(.+)\]', pid)
if match:
ispanel = 'yes'
except:
handles = pid
handle = pids[0]
if ispanel:
dirforzip = ''
for handle in handles:
dirforzip = get_papers(config['dataverseroot'], config['key'], cmd, handle, tmpdir, arc, finaldir)
(header, panelcells, metadata, totalpanel) = build_panel(config, switch, handles, datafilter)
filename = "paneldata.xlsx"
metadata = []
datadir = config['webtest']
localoutfile = panel2excel(dirforzip, filename, header, panelcells, metadata)
arc = 'dataarchive.zip'
compile2zip(dirforzip, arc)
root = config['apiroot'] + "/collabs/static/tmp/" + str(arc)
return redirect(root, code=301)
if classification:
outfile = "clioinfra.xlsx"
dirforzip = get_papers(config['dataverseroot'], config['key'], cmd, handle, tmpdir, arc, finaldir)
#fullpath = config['webtest'] + "/" + str(outfile)
fullpath = dirforzip
# Check selection
isselection = 'yes'
if datafilter['startyear'] == '1500':
if datafilter['ctrlist'] == '':
isselection = 'yes'
if isselection:
(datafile, outfilefinal, finalsubset) = dataframe_compiler(config, fullpath, handle, classification, datafilter)
#return datafile.to_html()
else:
# Copy original dataset
source = os.listdir(tmpdir)
for excelfile in source:
shutil.copy(tmpdir + '/' + excelfile, dirforzip)
#return outfilefinal
arc = 'dataarchive.zip'
if datafile:
arc = "%s_%s.zip" % (datafile, switch)
compile2zip(dirforzip, arc)
root = config['apiroot'] + "/collabs/static/tmp/" + str(arc)
#root = config['apiroot'] + "/collabs/static/tmp/" + str(outfile)
return redirect(root, code=301)
else:
zipfile = downloadzip(pid)
# CHANGE
#return zipfile
# DEBUG1
root = config['apiroot'] + "/collabs/static/tmp/" + zipfile
# HTML
#resp = make_response(render_template('progress.html', download=root))
#return "<a href=\"" + str(root) + "\">Download dataset(s) with all papers (zip archive)</a>"
#return resp
return redirect(root, code=301)
@app.route('/webmappercodes')
def webmapper():
config = configuration()
api = config['apiroot'] + "/collabs/static/data/historical.json"
(regions, countries, ctr2reg, webmapper, geocoder) = histo(api)
data = json.dumps(webmapper, encoding="utf-8", sort_keys=True, indent=4)
return Response(data, mimetype='application/json')
@app.route('/geofilter')
def geofilter():
pids = []
ctrlist = {}
ctrfilter = []
settings = Configuration()
clioinfra = Dataset()
clioindex = clioinfra.clioindex()
columns = ['1', 'Webmapper code', 'Webmapper numeric code', 'ccode', 'country name', 'start year', 'end year']
(classification, geodataset, title, units) = content2dataframe(settings.config, settings.config['geocoderhandle'])
settings = DataFilter(request.args)
if settings.selected():
pids = clioinfra.findhandles(settings.selected())
datasets = clioinfra.retrievedatasets(pids)
selection = []
for item in datasets:
dataset = datasets[item]
dataset.columns = dataset.ix[1]
dataset = dataset.convert_objects(convert_numeric=True)
dataset.index = dataset['Webmapper numeric code']
if (settings.minyear()):
dataset = dataset.loc[dataset['start year'] >= settings.minyear()]
dataset = dataset.loc[dataset['start year'] <= settings.maxyear()]
for col in columns:
dataset = dataset.drop(col, axis=1)
dataset['total'] = dataset.sum(axis=1)
dataset = dataset.ix[dataset['total'] > 0]
selection.append(dataset.index)
for row in selection:
for countryID in row:
if countryID not in ctrlist:
ctrlist[countryID] = countryID
ctrfilter.append(countryID)
geodataset = geodataset.convert_objects(convert_numeric=True)
geodataset = geodataset.loc[geodataset['start year'] >= settings.minyear()]
geodataset = geodataset.loc[geodataset['start year'] <= settings.maxyear()]
if settings.showframe():
geodataset.index = geodataset['Webmapper numeric code']
if ctrfilter:
geodataset = geodataset.ix[ctrfilter]
(geocoder, geolist, oecd) = buildgeocoder(geodataset, settings.config, settings.countryfilter())
data = json.dumps(geocoder, encoding="utf-8", sort_keys=True, indent=4)
return Response(data, mimetype='application/json')
@app.route('/geocoder')
def geocoder():
config = configuration()
config['remote'] = ''
remote = 'on'
# Geocoder
handle = config['geocoderhandle']
(classification, geodataset, title, units) = content2dataframe(config, config['geocoderhandle'])
fromyear = 1500
toyear = 2016
cfilter = ''
if request.args.get('name'):
cfilter = request.args.get('name')
if request.args.get('name'):
cfilter = request.args.get('name')
if fromyear:
historical = ''
if historical == 'old':
api = config['apiroot'] + "/collabs/static/data/historical.json"
(regions, countries, ctr2reg, webmapper, geocoder) = histo(api, cfilter)
else:
(geocoder, geolist, oecd) = buildgeocoder(geodataset, config, cfilter)
data = json.dumps(geocoder, encoding="utf-8", sort_keys=True, indent=4)
return Response(data, mimetype='application/json')
@app.route('/datasets')
def datasets():
config = configuration()
(jsondata, pid) = ('', '')
handles = []
combineddataset = []
resultdataset = ''
datainfo = []
outformat = 'json'
if request.args.get('format'):
outformat = request.args.get('format')
if request.args.get('handle'):
pid = request.args.get('handle')
if request.args.get('latest'):
dataset = config['defaulthandle']
return dataset
if pid:
(handles, pidslist) = pidfrompanel(pid)
hquery = formdatasetquery(handles,'')
datainfo = readdatasets('datasets', json.loads(hquery))
#if not datainfo:
#datainfo.append(pid)
for dataset in datainfo:
data = {}
handle = dataset['handle']
if outformat == 'json':
jsondata = str(dataset['data'])
jsondata = jsondata.replace(".0,", ",")
json_dict = ast.literal_eval(jsondata.strip())
data['handle'] = handle
try:
data['title'] = dataset['title']
data['units'] = dataset['units']
data['datasetID'] = dataset['datasetID']
except:
data['title'] = 'Title'
data['units'] = 'Units'
data['datasetID'] = 228
data['data'] = json_dict
combineddataset.append(data)
elif outformat == 'csv':
data['data'] = dataset['csvframe']
resultdataset = data['data']
if outformat == 'json':
if combineddataset:
finaldata = json.dumps(combineddataset, encoding="utf-8", sort_keys=True, indent=4)
return Response(finaldata, mimetype='application/json')
elif outformat == 'csv':
return Response(resultdataset, mimetype='text/plain')
@app.route('/dialog')
def dialog():
pid = ''
root = ''
config = configuration()
if request.args.get('pid'):
pid = request.args.get('pid')
zipfile = downloadzip(pid)
root = config['clearance'] + "/collabs/static/tmp/" + zipfile
resp = make_response(render_template('dialog.html', download=root))
return resp
@app.route('/dataverse')
def dataverse():
config = configuration()
root = config['dataverseroot']
query = ''
if request.args.get('q'):
query = request.args.get('q')
apiurl = root + "/api/search?q=" + str(query) + "&key=" + config['key'] + "&type=dataset"
url = request.url
data = ''
if query:
rawdata = load_dataverse(apiurl)
else:
rawdata = simplesearch(root, url, config['apiroot'])
#return rawdata
try:
data = json.dumps(rawdata, encoding="utf-8", sort_keys=True, indent=4)
return Response(data, mimetype='application/json')
except:
data = 'no data'
return 'No data'
@app.route('/indicators')
def indicators():
#data = load_indicators("indicators.csv")
config = configuration()
pid = config['topicindex']
if pid:
(handles, pidslist) = pidfrompanel(pid)
hquery = formdatasetquery(handles,'')
datainfo = readdatasets('datasets', json.loads(hquery))
csvio = StringIO(str(datainfo[0]['csvframe']))
data = pd.read_csv(csvio, sep='\t', dtype='unicode',quoting=csv.QUOTE_NONE)
columns = []
for item in data.columns:
col = re.sub(r"\"", "", item)
columns.append(col)
data.columns = columns
storeddata = readdatasets('datasets', '')
linking = {}
for item in storeddata:
try:
linking[item['title']] = item['handle']
except:
skip = 'yes'
data['handle'] = ''
data = data.drop('ID', axis=1)
for row in data.index:
title = data.ix[row]['Name']
try:
data.ix[row]['handle'] = linking[title]
except:
data.ix[row]['handle'] = ''
return Response(data.to_csv(orient='records'), mimetype='application/json')
else:
return 'No data'
@app.route('/search')
def search():
q = request.args.get('q')
data = searchdata(q)
return Response(data, mimetype='application/json')
def load_province_data(apiurl, province):
jsondataurl = apiurl + province
req = urllib2.Request(jsondataurl)
opener = urllib2.build_opener()
f = opener.open(req)
dataframe = simplejson.load(f)
return dataframe
def loadjson(url):
req = urllib2.Request(url)
opener = urllib2.build_opener()
f = opener.open(req)
dataframe = simplejson.load(f, "utf-8")
return dataframe
def get_iishvisitors_frame(thisyear):
if thisyear:
years = [thisyear]
else:
years = [2015, 2016]
finaldata = pd.DataFrame()
for year in years:
url = "http://visitors.collections.iisg.org/api/statistics/maps.php?year=" + str(year)
data = loadjson(url)
clioframe = pd.DataFrame(data['data'])
clioframe = clioframe.convert_objects(convert_numeric=True)
clioframe.columns = [u'coordinates', u'country', u'Webmapper numeric code', u'iso_code', u'total']
clioframe.index = clioframe['Webmapper numeric code']
if 'coordinates' in clioframe.columns:
clioframe = clioframe.drop('coordinates', axis=1)
xdata = pd.DataFrame(clioframe['total'])
xdata.columns = [year]
finaldata = pd.concat([finaldata, xdata], axis=1)
return finaldata
@app.route('/dataapi')
def dataapi():
(datafilter, handles) = ({}, [])
datafilter['ctrlist'] = ''
logscale = ''
config = configuration()
customyear = ''
fromyear = '1500'
toyear = '2012'
categoriesMax = 6
countriesNum = 200
geocoder = ''
(special, getrange, colormap, pallette, customcountrycodes, switch) = ('', '', '', '', '', 'modern')
if request.args.get('special'):
special = request.args.get('special')
if request.args.get('logscale'):
logscale = request.args.get('logscale')
if request.args.get('year'):
customyear = request.args.get('year')
datafilter['startyear'] = customyear
datafilter['endyear'] = customyear
if request.args.get('catmax'):
categoriesMax = int(request.args.get('catmax'))
if request.args.get('getrange'):
getrange = request.args.get('getrange')
if request.args.get('colors'):
pallette = request.args.get('colors')
if request.args.get('colormap'):
colormap = request.args.get('colormap')
if request.args.get('geocoder'):
switch = request.args.get('geocoder')
if switch == 'on':
switch = 'modern'
if request.args.get('handle'):
handlestring = request.args.get('handle')
ishandle = re.search(r'(hdl:\d+\/\w+)', handlestring)
if ishandle:
handle = ishandle.group(1)
handle = handle.replace("'", "")
else:
handle = handlestring
handles.append(handle)
if request.args.get('ctrlist'):
customcountrycodes = ''
tmpcustomcountrycodes = request.args.get('ctrlist')
c = tmpcustomcountrycodes.split(',')
for ids in sorted(c):
if ids:
customcountrycodes = str(customcountrycodes) + str(ids) + ','
customcountrycodes = customcountrycodes[:-1]
datafilter['ctrlist'] = tmpcustomcountrycodes
hist = {}
config = configuration()
try:
if len(customcountrycodes):
countriesNum = len(customcountrycodes.split(','))
if countriesNum < categoriesMax:
if countriesNum >= 1:
categoriesMax = countriesNum
except:
nothing = 1
# Old version of panel data
#(header, panelcells, codes, x1, x2, x3, x4, originalvalues) = data2panel(handles, customcountrycodes, fromyear, toyear, customyear, hist, logscale)
panelcells = []
# New version is fast
if config:
(geocoder, geolist, oecd2webmapper, modern, historical) = request_geocoder(config, '')
(subsets, panel) = ({}, [])
try:
(origdata, maindata, metadata) = request_datasets(config, switch, modern, historical, handles, geolist)
for handle in handles:
(datasubset, ctrlist) = datasetfilter(maindata[handle], datafilter)
if not datasubset.empty:
datasubset = datasubset.dropna(how='all')
panel.append(datasubset)
subsets[handle] = datasubset
except:
subsets[handles[0]] = get_iishvisitors_frame(int(customyear))
(panelcells, originalvalues) = dataset2panel(config, subsets[handles[0]], modern, logscale)
#(header, panelcells, codes, x1, x2, x3, x4, originalvalues) = data2panel(handles, customcountrycodes, fromyear, toyear, customyear, hist, logscale)
#modern = moderncodes(config['modernnames'], config['apiroot'])
#jsondata = data2json(modern, codes, panelcells)
#data = json.dumps(jsondata, ensure_ascii=False, sort_keys=True, indent=4)
# SCALES
if switch:
if switch == 'historical':
geocoder = historical
else:
geocoder = modern
#geocoder = ''
(defaultcolor, colors) = getcolors(categoriesMax, pallette, colormap)
(catlimit, ranges, dataset) = getscales(config, panelcells, colors, categoriesMax, geocoder, originalvalues, switch, logscale)
if getrange:
(showrange, tmprange) = combinerange(ranges)
webscale = webscales(showrange, colors, defaultcolor)
data = json.dumps(webscale, ensure_ascii=False, sort_keys=True, indent=4)
return Response(data, mimetype='application/json')
else:
data = json.dumps(dataset, ensure_ascii=False, sort_keys=True, indent=4)
return Response(data, mimetype='application/json')
@app.route('/tabledata')
def tabledata():
(data, aggrdata) = tableapi()
return Response(data, mimetype='text/plain')
if __name__ == '__main__':
app.run()
| [
"4tikhonov@gmail.com"
] | 4tikhonov@gmail.com |
3b6b40f5005c7370ab0320d53609d897bbb2b6db | 80e4b743e12cafaa1aea581cd42a80e9a4b6367f | /infrastructure/application/presentateurs/presentateur_dict.py | ded1eef13eb0fab9a7cf274a92a15f3fcb1fa545 | [] | no_license | sroccaserra/kata-bitcoin-watcher | cb26aba87853c928d2f79826fba536763d7b77b5 | 064c3e6e9163f854d3685d6c4d57a015aae107e6 | refs/heads/master | 2021-01-01T19:12:57.915380 | 2017-10-25T17:55:20 | 2017-10-26T17:00:54 | 98,538,271 | 0 | 1 | null | 2017-10-25T17:55:38 | 2017-07-27T13:20:18 | Python | UTF-8 | Python | false | false | 392 | py | from typing import Dict, Any
from domaine.je_presente_la_reponse import JePresenteLaReponse
class PresentateurDict(JePresenteLaReponse):
def __init__(self, courtier):
self.courtier = courtier
def est_ce_que_je_peux_acheter(self) -> Dict[str, Any]:
je_peux_acheter = self.courtier.est_ce_que_je_peux_acheter()
return {'can_I_buy_bitcoins': je_peux_acheter}
| [
"sroccaserra@yahoo.com"
] | sroccaserra@yahoo.com |
c2d2b5d7a681a9da5144059d9d26d2b5fce68443 | c4b7399a10b7f963f625d8d15e0a8215ea35ef7d | /225.用队列实现栈.py | 30aed3284d9d2a250a13ade1686b9a592683c1e3 | [] | no_license | kangkang59812/LeetCode-python | a29a9788aa36689d1f3ed0e8b668f79d9ca43d42 | 276d2137a929e41120c2e8a3a8e4d09023a2abd5 | refs/heads/master | 2022-12-05T02:49:14.554893 | 2020-08-30T08:22:16 | 2020-08-30T08:22:16 | 266,042,478 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,823 | py | #
# @lc app=leetcode.cn id=225 lang=python3
#
# [225] 用队列实现栈
#
# https://leetcode-cn.com/problems/implement-stack-using-queues/description/
#
# algorithms
# Easy (64.30%)
# Likes: 158
# Dislikes: 0
# Total Accepted: 45.7K
# Total Submissions: 71.1K
# Testcase Example: '["MyStack","push","push","top","pop","empty"]\n[[],[1],[2],[],[],[]]'
#
# 使用队列实现栈的下列操作:
#
#
# push(x) -- 元素 x 入栈
# pop() -- 移除栈顶元素
# top() -- 获取栈顶元素
# empty() -- 返回栈是否为空
#
#
# 注意:
#
#
# 你只能使用队列的基本操作-- 也就是 push to back, peek/pop from front, size, 和 is empty
# 这些操作是合法的。
# 你所使用的语言也许不支持队列。 你可以使用 list 或者 deque(双端队列)来模拟一个队列 , 只要是标准的队列操作即可。
# 你可以假设所有操作都是有效的(例如, 对一个空的栈不会调用 pop 或者 top 操作)。
#
#
#
# @lc code=start
class MyStack:
def __init__(self):
"""
Initialize your data structure here.
"""
self.res=[]
def push(self, x: int) -> None:
"""
Push element x onto stack.
"""
self.res.append(x)
def pop(self) -> int:
"""
Removes the element on top of the stack and returns that element.
"""
return self.res.pop()
def top(self) -> int:
"""
Get the top element.
"""
return self.res[-1]
def empty(self) -> bool:
"""
Returns whether the stack is empty.
"""
return len(self.res)==0
# Your MyStack object will be instantiated and called as such:
# obj = MyStack()
# obj.push(x)
# param_2 = obj.pop()
# param_3 = obj.top()
# param_4 = obj.empty()
# @lc code=end
| [
"596286458@qq.com"
] | 596286458@qq.com |
48cecd030f9a62127ce4d77f83fc4bd909564193 | 6afca65a7ada5b5eac8450977e0ff797819e1213 | /heapSort.py | e1ba2f06e2ea1bbaad80641a0f7427e6e8ec5ce6 | [] | no_license | dyqdzh/python-sorting-algorithm | 296df4e135e78acab0f848c11aa89d99740623fe | 718956e9ffa639d913da0db2675949ec2de74315 | refs/heads/master | 2023-03-10T21:00:57.904741 | 2021-02-20T04:07:27 | 2021-02-20T04:07:27 | 340,557,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 765 | py | from genRand import generateRandom
def buildMaxHeap(arr):
for i in range(len(arr)//2, -1, -1):
heapify(arr, i)
def heapify(arr, i):
left = 2*i + 1
right = 2*i + 2
largest = i
if left<arrLen and arr[left]>arr[largest]:
largest = left
if right<arrLen and arr[right]>arr[largest]:
largest = right
if largest != i:
arr[i], arr[largest] = arr[largest], arr[i]
heapify(arr, largest)
def heapSort(arr):
global arrLen
arrLen = len(arr)
buildMaxHeap(arr)
for i in range(len(arr)-1, 0, -1):
arr[i], arr[0] = arr[0], arr[i]
arrLen -= 1
heapify(arr, 0)
return arr
if __name__ == "__main__":
arr = generateRandom(15)
print(arr)
print(heapSort(arr))
| [
"du@dyq.local"
] | du@dyq.local |
13c20c67cc5595d399851d61d3c799180d386400 | 5f3d65d155bbbe9bb789dfd503eec1d919c36975 | /MyGDAS/lib/dataset/GenDataset.py | efbfac5b9c57e210b97abdcbacca773a31bb57cc | [] | no_license | MartrixG/CODES | 1987d62e4a57b6f4c49eabc644f75c9cc1d0eac5 | c185f41b08df9305ca5d3e9b1f97aae7118f3281 | refs/heads/master | 2022-12-06T18:19:56.426454 | 2020-09-01T02:35:13 | 2020-09-01T02:35:13 | 202,117,564 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,676 | py | import torch
import random
from torch.utils.data import Dataset, DataLoader
from copy import deepcopy
class GenDataset(Dataset):
def __init__(self, name, src_data, train_split, valid_split, check=True):
self.name = name
self.feature = deepcopy(src_data[0])
self.label = deepcopy(src_data[1])
self.train_split = deepcopy(train_split)
self.valid_split = deepcopy(valid_split)
if check:
intersection = set(train_split).intersection(set(valid_split))
assert len(intersection) == 0, 'the split train and validation sets should have no intersection'
self.length = len(self.train_split)
def __len__(self):
return self.length
def __getitem__(self, index):
assert 0 <= index < self.length, 'invalid index = {:}'.format(index)
train_index = self.train_split[index]
valid_index = random.choice(self.valid_split)
train_image, train_label = self.feature[train_index], self.label[train_index]
valid_image, valid_label = self.feature[valid_index], self.label[valid_index]
return train_image, train_label, valid_image, valid_label
class NormalDataset(Dataset):
def __init__(self, name, src_data):
self.name = name
self.feature = deepcopy(src_data[0])
self.label = deepcopy(src_data[1])
self.length = len(self.feature)
def __len__(self):
return self.length
def __getitem__(self, index):
assert 0 <= index < self.length, 'invalid index = {:}'.format(index)
chosen_feature, chosen_label = self.feature[index], self.label[index]
return chosen_feature, chosen_label | [
"44332912+1171000405@users.noreply.github.com"
] | 44332912+1171000405@users.noreply.github.com |
af55b3dc1dd23f8c60139a6b3f4f4d84a9010f7e | f1cd386753ca846781a7614dc39da560767ca41a | /test29.py | ed80da4ed45a60c9446b007ac74c60279266186e | [] | no_license | nyqtofovian/py_folder | 9a0d1371620a406d00fe6002827a58bbb4d0b7ab | bbd9b74fe286cc006e7212c3b24aa0fca2fbd3db | refs/heads/master | 2023-03-10T01:34:56.079713 | 2021-02-27T01:06:59 | 2021-02-27T01:06:59 | 342,740,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | def calc_tri(base = 0, height = 0):
return base * height * 0.5
res = calc_tri(50, 20)
print(res)
print(calc_tri()) | [
"neqrophobia@gmail.com"
] | neqrophobia@gmail.com |
5ae6ac367f0d37a90760c593c146b4bb56ce37ed | d8658a79dee03cf5a82ea22e627c281f6b0fe1ba | /Greendub_Flask_Project/flask_fp/posts/routes.py | 48cd9c88b2b5ea08e4cb0cb9dd4d7ae0bf65691b | [] | no_license | BiT-2/Citizen-Science-Web-App | f4a942fd8535d39b9b640655dae29052a03ce564 | 94589e746edb95ed67dd9aed5523fda010b64a24 | refs/heads/master | 2022-11-18T02:11:30.071316 | 2020-07-16T01:06:13 | 2020-07-16T01:06:13 | 280,016,892 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | from flask import render_template, url_for, flash, redirect, request, abort, Blueprint
from flask_login import current_user, login_required
from flask_fp import db
from flask_fp.models import Post, Image
from flask_fp.posts.forms import PostForm
from flask_fp.users.utils import save_picture_posted
posts = Blueprint('posts', __name__)
@posts.route("/post/new", methods = ['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
picture_file,response = save_picture_posted(form.picture.data)
post = Post(title = form.title.data, content = form.content.data, author = current_user, image_post=picture_file)
current_post_id = post.id
db.session.add(post)
for label in response:
image_recg = Image(label = label['Name'], confidence = str(label['Confidence']), image_trans = post, post_id = post.id)
db.session.add(image_recg)
db.session.commit()
flash('Post Created', 'success')
return redirect(url_for('main.home'))
return render_template('create_post.html', title = 'New Post', form = form, legend ='New Post')
@posts.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
image_trans = Image.query.filter_by(post_id = post_id).all()
return render_template('post.html', title = post.title, post=post, image_trans = image_trans)
@posts.route("/post/<int:post_id>/update", methods = ['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
picture_file = save_picture_posted(form.picture.data)
post.image_post = picture_file
db.session.commit()
flash('Updated successfully', 'success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('create_post.html', title = 'Update Post', form = form, legend = 'Update Post')
@posts.route("/post/<int:post_id>/dalete", methods = ['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Post successfully deleted', 'success')
return redirect(url_for('main.home'))
| [
"vighnesh@uw.edi"
] | vighnesh@uw.edi |
9cc6e234709a6d622d8926ba526193a4ea03dac5 | 3ef70fe63acaa665e2b163f30f1abd0a592231c1 | /stackoverflow/venv/lib/python3.6/site-packages/twisted/conch/test/test_conch.py | dbbe3beb5646d0d4a32b80d2eb395ec44f5e24af | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | wistbean/learn_python3_spider | 14914b63691ac032955ba1adc29ad64976d80e15 | 40861791ec4ed3bbd14b07875af25cc740f76920 | refs/heads/master | 2023-08-16T05:42:27.208302 | 2023-03-30T17:03:58 | 2023-03-30T17:03:58 | 179,152,420 | 14,403 | 3,556 | MIT | 2022-05-20T14:08:34 | 2019-04-02T20:19:54 | Python | UTF-8 | Python | false | false | 25,140 | py | # -*- test-case-name: twisted.conch.test.test_conch -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
import os, sys, socket
import subprocess
from itertools import count
from zope.interface import implementer
from twisted.python.reflect import requireModule
cryptography = requireModule("cryptography")
from twisted.conch.error import ConchError
if cryptography:
from twisted.conch.avatar import ConchUser
from twisted.conch.ssh.session import ISession, SSHSession, wrapProtocol
else:
from twisted.conch.interfaces import ISession
class ConchUser: pass
from twisted.cred import portal
from twisted.internet import reactor, defer, protocol
from twisted.internet.error import ProcessExitedAlready
from twisted.internet.task import LoopingCall
from twisted.internet.utils import getProcessValue
from twisted.python import filepath, log, runtime
from twisted.python.compat import unicode
from twisted.trial import unittest
try:
from twisted.conch.scripts.conch import SSHSession as StdioInteractingSession
except ImportError as e:
StdioInteractingSession = None
_reason = str(e)
del e
from twisted.conch.test.test_ssh import ConchTestRealm
from twisted.python.procutils import which
from twisted.conch.test.keydata import publicRSA_openssh, privateRSA_openssh
from twisted.conch.test.keydata import publicDSA_openssh, privateDSA_openssh
try:
from twisted.conch.test.test_ssh import ConchTestServerFactory, \
conchTestPublicKeyChecker
except ImportError:
pass
try:
import cryptography
except ImportError:
cryptography = None
try:
import pyasn1
except ImportError:
pyasn1 = None
def _has_ipv6():
""" Returns True if the system can bind an IPv6 address."""
sock = None
has_ipv6 = False
try:
sock = socket.socket(socket.AF_INET6)
sock.bind(('::1', 0))
has_ipv6 = True
except socket.error:
pass
if sock:
sock.close()
return has_ipv6
HAS_IPV6 = _has_ipv6()
class FakeStdio(object):
"""
A fake for testing L{twisted.conch.scripts.conch.SSHSession.eofReceived} and
L{twisted.conch.scripts.cftp.SSHSession.eofReceived}.
@ivar writeConnLost: A flag which records whether L{loserWriteConnection}
has been called.
"""
writeConnLost = False
def loseWriteConnection(self):
"""
Record the call to loseWriteConnection.
"""
self.writeConnLost = True
class StdioInteractingSessionTests(unittest.TestCase):
"""
Tests for L{twisted.conch.scripts.conch.SSHSession}.
"""
if StdioInteractingSession is None:
skip = _reason
def test_eofReceived(self):
"""
L{twisted.conch.scripts.conch.SSHSession.eofReceived} loses the
write half of its stdio connection.
"""
stdio = FakeStdio()
channel = StdioInteractingSession()
channel.stdio = stdio
channel.eofReceived()
self.assertTrue(stdio.writeConnLost)
class Echo(protocol.Protocol):
def connectionMade(self):
log.msg('ECHO CONNECTION MADE')
def connectionLost(self, reason):
log.msg('ECHO CONNECTION DONE')
def dataReceived(self, data):
self.transport.write(data)
if b'\n' in data:
self.transport.loseConnection()
class EchoFactory(protocol.Factory):
protocol = Echo
class ConchTestOpenSSHProcess(protocol.ProcessProtocol):
"""
Test protocol for launching an OpenSSH client process.
@ivar deferred: Set by whatever uses this object. Accessed using
L{_getDeferred}, which destroys the value so the Deferred is not
fired twice. Fires when the process is terminated.
"""
deferred = None
buf = b''
def _getDeferred(self):
d, self.deferred = self.deferred, None
return d
def outReceived(self, data):
self.buf += data
def processEnded(self, reason):
"""
Called when the process has ended.
@param reason: a Failure giving the reason for the process' end.
"""
if reason.value.exitCode != 0:
self._getDeferred().errback(
ConchError("exit code was not 0: {}".format(
reason.value.exitCode)))
else:
buf = self.buf.replace(b'\r\n', b'\n')
self._getDeferred().callback(buf)
class ConchTestForwardingProcess(protocol.ProcessProtocol):
"""
Manages a third-party process which launches a server.
Uses L{ConchTestForwardingPort} to connect to the third-party server.
Once L{ConchTestForwardingPort} has disconnected, kill the process and fire
a Deferred with the data received by the L{ConchTestForwardingPort}.
@ivar deferred: Set by whatever uses this object. Accessed using
L{_getDeferred}, which destroys the value so the Deferred is not
fired twice. Fires when the process is terminated.
"""
deferred = None
def __init__(self, port, data):
"""
@type port: L{int}
@param port: The port on which the third-party server is listening.
(it is assumed that the server is running on localhost).
@type data: L{str}
@param data: This is sent to the third-party server. Must end with '\n'
in order to trigger a disconnect.
"""
self.port = port
self.buffer = None
self.data = data
def _getDeferred(self):
d, self.deferred = self.deferred, None
return d
def connectionMade(self):
self._connect()
def _connect(self):
"""
Connect to the server, which is often a third-party process.
Tries to reconnect if it fails because we have no way of determining
exactly when the port becomes available for listening -- we can only
know when the process starts.
"""
cc = protocol.ClientCreator(reactor, ConchTestForwardingPort, self,
self.data)
d = cc.connectTCP('127.0.0.1', self.port)
d.addErrback(self._ebConnect)
return d
def _ebConnect(self, f):
reactor.callLater(.1, self._connect)
def forwardingPortDisconnected(self, buffer):
"""
The network connection has died; save the buffer of output
from the network and attempt to quit the process gracefully,
and then (after the reactor has spun) send it a KILL signal.
"""
self.buffer = buffer
self.transport.write(b'\x03')
self.transport.loseConnection()
reactor.callLater(0, self._reallyDie)
def _reallyDie(self):
try:
self.transport.signalProcess('KILL')
except ProcessExitedAlready:
pass
def processEnded(self, reason):
"""
Fire the Deferred at self.deferred with the data collected
from the L{ConchTestForwardingPort} connection, if any.
"""
self._getDeferred().callback(self.buffer)
class ConchTestForwardingPort(protocol.Protocol):
"""
Connects to server launched by a third-party process (managed by
L{ConchTestForwardingProcess}) sends data, then reports whatever it
received back to the L{ConchTestForwardingProcess} once the connection
is ended.
"""
def __init__(self, protocol, data):
"""
@type protocol: L{ConchTestForwardingProcess}
@param protocol: The L{ProcessProtocol} which made this connection.
@type data: str
@param data: The data to be sent to the third-party server.
"""
self.protocol = protocol
self.data = data
def connectionMade(self):
self.buffer = b''
self.transport.write(self.data)
def dataReceived(self, data):
self.buffer += data
def connectionLost(self, reason):
self.protocol.forwardingPortDisconnected(self.buffer)
def _makeArgs(args, mod="conch"):
start = [sys.executable, '-c'
"""
### Twisted Preamble
import sys, os
path = os.path.abspath(sys.argv[0])
while os.path.dirname(path) != path:
if os.path.basename(path).startswith('Twisted'):
sys.path.insert(0, path)
break
path = os.path.dirname(path)
from twisted.conch.scripts.%s import run
run()""" % mod]
madeArgs = []
for arg in start + list(args):
if isinstance(arg, unicode):
arg = arg.encode("utf-8")
madeArgs.append(arg)
return madeArgs
class ConchServerSetupMixin:
if not cryptography:
skip = "can't run without cryptography"
if not pyasn1:
skip = "Cannot run without PyASN1"
realmFactory = staticmethod(lambda: ConchTestRealm(b'testuser'))
def _createFiles(self):
for f in ['rsa_test','rsa_test.pub','dsa_test','dsa_test.pub',
'kh_test']:
if os.path.exists(f):
os.remove(f)
with open('rsa_test','wb') as f:
f.write(privateRSA_openssh)
with open('rsa_test.pub','wb') as f:
f.write(publicRSA_openssh)
with open('dsa_test.pub','wb') as f:
f.write(publicDSA_openssh)
with open('dsa_test','wb') as f:
f.write(privateDSA_openssh)
os.chmod('dsa_test', 33152)
os.chmod('rsa_test', 33152)
with open('kh_test','wb') as f:
f.write(b'127.0.0.1 '+publicRSA_openssh)
def _getFreePort(self):
s = socket.socket()
s.bind(('', 0))
port = s.getsockname()[1]
s.close()
return port
def _makeConchFactory(self):
"""
Make a L{ConchTestServerFactory}, which allows us to start a
L{ConchTestServer} -- i.e. an actually listening conch.
"""
realm = self.realmFactory()
p = portal.Portal(realm)
p.registerChecker(conchTestPublicKeyChecker())
factory = ConchTestServerFactory()
factory.portal = p
return factory
def setUp(self):
self._createFiles()
self.conchFactory = self._makeConchFactory()
self.conchFactory.expectedLoseConnection = 1
self.conchServer = reactor.listenTCP(0, self.conchFactory,
interface="127.0.0.1")
self.echoServer = reactor.listenTCP(0, EchoFactory())
self.echoPort = self.echoServer.getHost().port
if HAS_IPV6:
self.echoServerV6 = reactor.listenTCP(0, EchoFactory(), interface="::1")
self.echoPortV6 = self.echoServerV6.getHost().port
def tearDown(self):
try:
self.conchFactory.proto.done = 1
except AttributeError:
pass
else:
self.conchFactory.proto.transport.loseConnection()
deferreds = [
defer.maybeDeferred(self.conchServer.stopListening),
defer.maybeDeferred(self.echoServer.stopListening),
]
if HAS_IPV6:
deferreds.append(defer.maybeDeferred(self.echoServerV6.stopListening))
return defer.gatherResults(deferreds)
class ForwardingMixin(ConchServerSetupMixin):
"""
Template class for tests of the Conch server's ability to forward arbitrary
protocols over SSH.
These tests are integration tests, not unit tests. They launch a Conch
server, a custom TCP server (just an L{EchoProtocol}) and then call
L{execute}.
L{execute} is implemented by subclasses of L{ForwardingMixin}. It should
cause an SSH client to connect to the Conch server, asking it to forward
data to the custom TCP server.
"""
def test_exec(self):
"""
Test that we can use whatever client to send the command "echo goodbye"
to the Conch server. Make sure we receive "goodbye" back from the
server.
"""
d = self.execute('echo goodbye', ConchTestOpenSSHProcess())
return d.addCallback(self.assertEqual, b'goodbye\n')
def test_localToRemoteForwarding(self):
"""
Test that we can use whatever client to forward a local port to a
specified port on the server.
"""
localPort = self._getFreePort()
process = ConchTestForwardingProcess(localPort, b'test\n')
d = self.execute('', process,
sshArgs='-N -L%i:127.0.0.1:%i'
% (localPort, self.echoPort))
d.addCallback(self.assertEqual, b'test\n')
return d
def test_remoteToLocalForwarding(self):
"""
Test that we can use whatever client to forward a port from the server
to a port locally.
"""
localPort = self._getFreePort()
process = ConchTestForwardingProcess(localPort, b'test\n')
d = self.execute('', process,
sshArgs='-N -R %i:127.0.0.1:%i'
% (localPort, self.echoPort))
d.addCallback(self.assertEqual, b'test\n')
return d
# Conventionally there is a separate adapter object which provides ISession for
# the user, but making the user provide ISession directly works too. This isn't
# a full implementation of ISession though, just enough to make these tests
# pass.
@implementer(ISession)
class RekeyAvatar(ConchUser):
"""
This avatar implements a shell which sends 60 numbered lines to whatever
connects to it, then closes the session with a 0 exit status.
60 lines is selected as being enough to send more than 2kB of traffic, the
amount the client is configured to initiate a rekey after.
"""
def __init__(self):
ConchUser.__init__(self)
self.channelLookup[b'session'] = SSHSession
def openShell(self, transport):
"""
Write 60 lines of data to the transport, then exit.
"""
proto = protocol.Protocol()
proto.makeConnection(transport)
transport.makeConnection(wrapProtocol(proto))
# Send enough bytes to the connection so that a rekey is triggered in
# the client.
def write(counter):
i = next(counter)
if i == 60:
call.stop()
transport.session.conn.sendRequest(
transport.session, b'exit-status', b'\x00\x00\x00\x00')
transport.loseConnection()
else:
line = "line #%02d\n" % (i,)
line = line.encode("utf-8")
transport.write(line)
# The timing for this loop is an educated guess (and/or the result of
# experimentation) to exercise the case where a packet is generated
# mid-rekey. Since the other side of the connection is (so far) the
# OpenSSH command line client, there's no easy way to determine when the
# rekey has been initiated. If there were, then generating a packet
# immediately at that time would be a better way to test the
# functionality being tested here.
call = LoopingCall(write, count())
call.start(0.01)
def closed(self):
"""
Ignore the close of the session.
"""
class RekeyRealm:
"""
This realm gives out new L{RekeyAvatar} instances for any avatar request.
"""
def requestAvatar(self, avatarID, mind, *interfaces):
return interfaces[0], RekeyAvatar(), lambda: None
class RekeyTestsMixin(ConchServerSetupMixin):
"""
TestCase mixin which defines tests exercising L{SSHTransportBase}'s handling
of rekeying messages.
"""
realmFactory = RekeyRealm
def test_clientRekey(self):
"""
After a client-initiated rekey is completed, application data continues
to be passed over the SSH connection.
"""
process = ConchTestOpenSSHProcess()
d = self.execute("", process, '-o RekeyLimit=2K')
def finished(result):
expectedResult = '\n'.join(['line #%02d' % (i,) for i in range(60)]) + '\n'
expectedResult = expectedResult.encode("utf-8")
self.assertEqual(result, expectedResult)
d.addCallback(finished)
return d
class OpenSSHClientMixin:
if not which('ssh'):
skip = "no ssh command-line client available"
def execute(self, remoteCommand, process, sshArgs=''):
"""
Connects to the SSH server started in L{ConchServerSetupMixin.setUp} by
running the 'ssh' command line tool.
@type remoteCommand: str
@param remoteCommand: The command (with arguments) to run on the
remote end.
@type process: L{ConchTestOpenSSHProcess}
@type sshArgs: str
@param sshArgs: Arguments to pass to the 'ssh' process.
@return: L{defer.Deferred}
"""
# PubkeyAcceptedKeyTypes does not exist prior to OpenSSH 7.0 so we
# first need to check if we can set it. If we can, -V will just print
# the version without doing anything else; if we can't, we will get a
# configuration error.
d = getProcessValue(
which('ssh')[0], ('-o', 'PubkeyAcceptedKeyTypes=ssh-dss', '-V'))
def hasPAKT(status):
if status == 0:
opts = '-oPubkeyAcceptedKeyTypes=ssh-dss '
else:
opts = ''
process.deferred = defer.Deferred()
# Pass -F /dev/null to avoid the user's configuration file from
# being loaded, as it may contain settings that cause our tests to
# fail or hang.
cmdline = ('ssh -2 -l testuser -p %i '
'-F /dev/null '
'-oUserKnownHostsFile=kh_test '
'-oPasswordAuthentication=no '
# Always use the RSA key, since that's the one in kh_test.
'-oHostKeyAlgorithms=ssh-rsa '
'-a '
'-i dsa_test ') + opts + sshArgs + \
' 127.0.0.1 ' + remoteCommand
port = self.conchServer.getHost().port
cmds = (cmdline % port).split()
encodedCmds = []
for cmd in cmds:
if isinstance(cmd, unicode):
cmd = cmd.encode("utf-8")
encodedCmds.append(cmd)
reactor.spawnProcess(process, which('ssh')[0], encodedCmds)
return process.deferred
return d.addCallback(hasPAKT)
class OpenSSHKeyExchangeTests(ConchServerSetupMixin, OpenSSHClientMixin,
unittest.TestCase):
"""
Tests L{SSHTransportBase}'s key exchange algorithm compatibility with
OpenSSH.
"""
def assertExecuteWithKexAlgorithm(self, keyExchangeAlgo):
"""
Call execute() method of L{OpenSSHClientMixin} with an ssh option that
forces the exclusive use of the key exchange algorithm specified by
keyExchangeAlgo
@type keyExchangeAlgo: L{str}
@param keyExchangeAlgo: The key exchange algorithm to use
@return: L{defer.Deferred}
"""
kexAlgorithms = []
try:
output = subprocess.check_output([which('ssh')[0], '-Q', 'kex'],
stderr=subprocess.STDOUT)
if not isinstance(output, str):
output = output.decode("utf-8")
kexAlgorithms = output.split()
except:
pass
if keyExchangeAlgo not in kexAlgorithms:
raise unittest.SkipTest(
"{} not supported by ssh client".format(
keyExchangeAlgo))
d = self.execute('echo hello', ConchTestOpenSSHProcess(),
'-oKexAlgorithms=' + keyExchangeAlgo)
return d.addCallback(self.assertEqual, b'hello\n')
def test_ECDHSHA256(self):
"""
The ecdh-sha2-nistp256 key exchange algorithm is compatible with
OpenSSH
"""
return self.assertExecuteWithKexAlgorithm(
'ecdh-sha2-nistp256')
def test_ECDHSHA384(self):
"""
The ecdh-sha2-nistp384 key exchange algorithm is compatible with
OpenSSH
"""
return self.assertExecuteWithKexAlgorithm(
'ecdh-sha2-nistp384')
def test_ECDHSHA521(self):
"""
The ecdh-sha2-nistp521 key exchange algorithm is compatible with
OpenSSH
"""
return self.assertExecuteWithKexAlgorithm(
'ecdh-sha2-nistp521')
def test_DH_GROUP14(self):
"""
The diffie-hellman-group14-sha1 key exchange algorithm is compatible
with OpenSSH.
"""
return self.assertExecuteWithKexAlgorithm(
'diffie-hellman-group14-sha1')
def test_DH_GROUP_EXCHANGE_SHA1(self):
"""
The diffie-hellman-group-exchange-sha1 key exchange algorithm is
compatible with OpenSSH.
"""
return self.assertExecuteWithKexAlgorithm(
'diffie-hellman-group-exchange-sha1')
def test_DH_GROUP_EXCHANGE_SHA256(self):
"""
The diffie-hellman-group-exchange-sha256 key exchange algorithm is
compatible with OpenSSH.
"""
return self.assertExecuteWithKexAlgorithm(
'diffie-hellman-group-exchange-sha256')
def test_unsupported_algorithm(self):
"""
The list of key exchange algorithms supported
by OpenSSH client is obtained with C{ssh -Q kex}.
"""
self.assertRaises(unittest.SkipTest,
self.assertExecuteWithKexAlgorithm,
'unsupported-algorithm')
class OpenSSHClientForwardingTests(ForwardingMixin, OpenSSHClientMixin,
unittest.TestCase):
"""
Connection forwarding tests run against the OpenSSL command line client.
"""
def test_localToRemoteForwardingV6(self):
"""
Forwarding of arbitrary IPv6 TCP connections via SSH.
"""
localPort = self._getFreePort()
process = ConchTestForwardingProcess(localPort, b'test\n')
d = self.execute('', process,
sshArgs='-N -L%i:[::1]:%i'
% (localPort, self.echoPortV6))
d.addCallback(self.assertEqual, b'test\n')
return d
if not HAS_IPV6:
test_localToRemoteForwardingV6.skip = "Requires IPv6 support"
class OpenSSHClientRekeyTests(RekeyTestsMixin, OpenSSHClientMixin,
unittest.TestCase):
"""
Rekeying tests run against the OpenSSL command line client.
"""
class CmdLineClientTests(ForwardingMixin, unittest.TestCase):
"""
Connection forwarding tests run against the Conch command line client.
"""
if runtime.platformType == 'win32':
skip = "can't run cmdline client on win32"
def execute(self, remoteCommand, process, sshArgs='', conchArgs=None):
"""
As for L{OpenSSHClientTestCase.execute}, except it runs the 'conch'
command line tool, not 'ssh'.
"""
if conchArgs is None:
conchArgs = []
process.deferred = defer.Deferred()
port = self.conchServer.getHost().port
cmd = ('-p {} -l testuser '
'--known-hosts kh_test '
'--user-authentications publickey '
'-a '
'-i dsa_test '
'-v '.format(port) + sshArgs +
' 127.0.0.1 ' + remoteCommand)
cmds = _makeArgs(conchArgs + cmd.split())
env = os.environ.copy()
env['PYTHONPATH'] = os.pathsep.join(sys.path)
encodedCmds = []
encodedEnv = {}
for cmd in cmds:
if isinstance(cmd, unicode):
cmd = cmd.encode("utf-8")
encodedCmds.append(cmd)
for var in env:
val = env[var]
if isinstance(var, unicode):
var = var.encode("utf-8")
if isinstance(val, unicode):
val = val.encode("utf-8")
encodedEnv[var] = val
reactor.spawnProcess(process, sys.executable, encodedCmds, env=encodedEnv)
return process.deferred
def test_runWithLogFile(self):
"""
It can store logs to a local file.
"""
def cb_check_log(result):
logContent = logPath.getContent()
self.assertIn(b'Log opened.', logContent)
logPath = filepath.FilePath(self.mktemp())
d = self.execute(
remoteCommand='echo goodbye',
process=ConchTestOpenSSHProcess(),
conchArgs=['--log', '--logfile', logPath.path,
'--host-key-algorithms', 'ssh-rsa']
)
d.addCallback(self.assertEqual, b'goodbye\n')
d.addCallback(cb_check_log)
return d
def test_runWithNoHostAlgorithmsSpecified(self):
"""
Do not use --host-key-algorithms flag on command line.
"""
d = self.execute(
remoteCommand='echo goodbye',
process=ConchTestOpenSSHProcess()
)
d.addCallback(self.assertEqual, b'goodbye\n')
return d
| [
"354142480@qq.com"
] | 354142480@qq.com |
ce4a1131984c113d06e94fc1e1632be08486513b | c496d925ac4ee6440503ece7d1ff7258079d9f6b | /examples/15_module_re/ex04_sh_cdp_n.py | 88e3424eced0bef7bfe5c654fa090ada4bf2a7f1 | [
"BSD-2-Clause"
] | permissive | arturiuslim/arthur_storage | 40ad1dd8885e103169ee829ac452a8910dfaada7 | ff621f1dfdb5f6513f6fe6dfa2c1a8bb6b5748fb | refs/heads/master | 2023-05-29T03:54:18.328376 | 2023-05-05T07:11:05 | 2023-05-05T07:11:05 | 233,453,413 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 256 | py | from pprint import pprint
import re
#regex = r"^(\S+) +(\S+ [\d/]+) +.+ (\S+ [\d/]+)$"
regex = r"^(\S+) *([A-Z]\S+ [\d/]+) +.+ (\S+ [\d/]+)$"
with open("sh_cdp_n_sw1.txt") as f:
for line in f:
m = re.search(regex, line)
if m:
pprint(m.groups())
| [
"arturiuslim@gmail.com"
] | arturiuslim@gmail.com |
5b3070b16354d8c0a398b356f50a55c9905eb3c5 | 88278388281957247f6be9f46a3b556ca8f94e26 | /Problems/Film/main.py | 291bf1435a604415f688ace9aad9bf13a98de311 | [] | no_license | guozhe001/Loan-Calculator | 2b2ad0218bea600935a0da9c10024fa161f6fcb4 | 9b83a4a21c9b7b0c71611dfacb09576d68ece9bb | refs/heads/main | 2023-01-01T19:16:38.638546 | 2020-10-30T07:39:47 | 2020-10-30T07:39:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 222 | py | movie_title = input()
director_name = input()
release_year = input()
# 字符串格式化输出
print(f"{movie_title} (dir. {director_name}) came out in {release_year}")
# Fight Club (dir. David Fincher) came out in 1999
| [
"guozhe@renmaitech.com"
] | guozhe@renmaitech.com |
190f56c195b069a039efb139396d924886c96986 | dc047381dca5b0bce1e1848edfc6a63235dc4887 | /objectTracker.py | 929b24788862efa19c008b4328d9538b3323e5be | [] | no_license | phd-jaybie/opencv-projects | 8f2446ea18c5efdff2c8ab1f78616c53d40e497f | cc88008dbed219ae1e2752fe8df27bab35bc9404 | refs/heads/master | 2021-09-05T00:54:56.688213 | 2018-01-23T06:21:04 | 2018-01-23T06:21:04 | 104,173,966 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,791 | py | import cv2
import sys
(major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
if __name__ == '__main__' :
# Set up tracker.
# Instead of MIL, you can also use
tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
tracker_type = tracker_types[1]
if int(minor_ver) < 3:
tracker = cv2.Tracker_create(tracker_type)
else:
if tracker_type == 'BOOSTING':
tracker = cv2.TrackerBoosting_create()
if tracker_type == 'MIL':
tracker = cv2.TrackerMIL_create()
if tracker_type == 'KCF':
tracker = cv2.TrackerKCF_create()
if tracker_type == 'TLD':
tracker = cv2.TrackerTLD_create()
if tracker_type == 'MEDIANFLOW':
tracker = cv2.TrackerMedianFlow_create()
if tracker_type == 'GOTURN':
tracker = cv2.TrackerGOTURN_create()
print("Tracker type : " + tracker_type)
# Read video
cap = cv2.VideoCapture(0)
# Exit if video not opened.
if not cap.isOpened():
print ("Could not open video")
sys.exit()
# Read first frame.
ok, frame = cap.read()
if not ok:
print("Cannot read video file")
sys.exit()
# Define an initial bounding box
bbox = (287, 23, 86, 320)
# Uncomment the line below to select a different bounding box
bbox = cv2.selectROI(frame, False)
# Initialize tracker with first frame and bounding box
ok = tracker.init(frame, bbox)
while True:
# Read a new frame
ok, frame = cap.read()
if not ok:
break
# Start timer
timer = cv2.getTickCount()
# Update tracker
ok, bbox = tracker.update(frame)
# Calculate Frames per second (FPS)
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# Draw bounding box
if ok:
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
else :
# Tracking failure
cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# Display tracker type on frame
cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# Display FPS on frame
cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# Display result
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27 : break
cap.release()
cv2.destroyAllWindows()
| [
"j.deguzman@student.unsw.edu.au"
] | j.deguzman@student.unsw.edu.au |
3e08d654f048917a2b0e628dff9d3407f41d5863 | d7d9cc992a15572460da83b7bf0336f4d65bee05 | /ZoneKiller.py | b97508058ad3d77ebe4013199bbcd6eee14296e2 | [] | no_license | toustyj/XAttacker-3.x | 462c7e19e7b1064712185342b695657d63296ceb | 576e9f0097e9c609e41ae7411b85cad001288026 | refs/heads/main | 2023-05-01T02:25:29.710971 | 2021-05-17T20:05:15 | 2021-05-17T20:05:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,802 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#Ne touche pas le script -_-
#Don't Edit Logo -_-
import requests, httplib, urllib
import socket
from platform import system
import os
import sys, time
import re
import threading
from multiprocessing.dummy import Pool
from colorama import Fore
from colorama import Style
from colorama import init
init(autoreset=True)
fr = Fore.RED
fh = Fore.RED
fc = Fore.CYAN
fo = Fore.MAGENTA
fw = Fore.WHITE
fy = Fore.YELLOW
fbl = Fore.BLUE
fg = Fore.GREEN
sd = Style.DIM
fb = Fore.RESET
sn = Style.NORMAL
sb = Style.BRIGHT
user = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:57.0) Gecko/20100101 Firefox/57.0"}
url = "http://www.zone-h.org/archive/notifier="
urll = "http://zone-h.org/archive/published=0"
url2 = "http://www.defacers.org/onhold!"
url4 = "http://www.defacers.org/gold!"
my_cook = {
"ZHE" : "58594bbe3162e99794eaf4696215e7e8",
"PHPSESSID" : "gklckq837dpm9qt8jj07a88a65"
}
def zonehh():
print("""
|---| Grabb Sites From Zone-h |--|
\033[91m[1] \033[95mGrabb Sites By Notifier
\033[91m[2] \033[95mGrabb Sites By Onhold
""")
sec = int(raw_input("Choose Section: "))
if sec == 1:
notf = raw_input("\033[95mEntre notifier: \033[92m")
for i in range(1, 51):
dz = requests.get(url + notf +"/page=" + str(i), cookies=my_cook)
dzz = dz.content
print(url + notf +"/page=" + str(i))
if '<html><body>-<script type="text/javascript"' in dzz:
print("Change Cookies Please")
sys.exit()
elif '<input type="text" name="captcha" value=""><input type="submit">' in dzz:
print("Entre Captcha In Zone-h From Ur Browser :/")
sys.exit()
else:
Hunt_urls = re.findall('<td>(.*)\n </td>', dzz)
if '/mirror/id/' in dzz:
for xx in Hunt_urls:
qqq = xx.replace('...','')
print ' [' + '*' + '] ' + qqq.split('/')[0]
with open( notf + '.txt', 'a') as rr:
rr.write("http://" + qqq.split('/')[0] + '\n')
else:
print("Grabb Sites Completed Starting XAttacker !!")
time.sleep(4)
os.system('perl XAttacker.pl')
sys.exit()
elif sec == 2:
print(":* __Grabb Sites After Done Please Open XAttacker.pl")
for qwd in range(1, 51):
rb = requests.get(urll + "/page=" + str(qwd) , cookies=my_cook)
dzq = rb.content
if '<html><body>-<script type="text/javascript"' in dzq:
print("Change Cookies Plz")
sys.exit()
elif "captcha" in dzq:
print("Entre captcha In Your Browser Of Site [zone-h.org]")
else:
Hunt_urlss = re.findall('<td>(.*)\n </td>', dzq)
for xxx in Hunt_urlss:
qqqq = xxx.replace('...','')
print ' [' + '*' + '] ' + qqqq.split('/')[0]
with open('onhold_zone.txt', 'a') as rrr:
rrr.write("http://" + qqqq.split('/')[0] + '\n')
else:
print("Fuck You Men")
def defacers():
print("""
|---| Grabb Sites From Defacers.org |--|
\033[91m[1] \033[95mGrabb Sites By Onhold
\033[91m[2] \033[95mGrabb Sites By Archive
""")
sec = int(raw_input("Choose Section: "))
if sec == 1:
for i in range(1, 380):
print("Page: "), str(i) + "\033[91m Waiting Grabbed Sites ..... <3"
rb = requests.get(url2 + str(i),headers=user)
okbb = rb.content
domains = re.findall(r'title=".*" tar.?', okbb)
for iii in domains:
iii = iii.replace('" target="_blank" reel="nofollow">', "")
iii = iii.replace('title="', "")
iii = iii.replace('" targ', "")
print("\033[95mhttp://" + iii + "/")
with open("Onhold_defacer.txt", "a") as by:
by.writelines("http://" + iii + "/")
by.writelines("\n")
print ("\t\t[+] Page Saved_"),str(i) +(" done [+]\n")
elif sec == 2:
for i in range(1, 25):
print("Page: "), str(i) + " \033[91mWaiting Grabbed Sites Governement ..... <3"
rb = requests.get(url4 + str(i),headers=user)
okbb = rb.content
domains = re.findall(r'title=".*" tar.?', okbb)
for iii in domains:
iii = iii.replace('" target="_blank" reel="nofollow">', "")
iii = iii.replace('title="', "")
iii = iii.replace('" targ', "")
print("\033[95mhttp://" + iii + "/")
with open("govSites_defacer.txt", "a") as by:
by.writelines("http://" + iii + "/")
by.writelines("\n")
print ("\t\t[+] Page Saved_"),str(i) +(" done [+]\n")
else:
print("Fuck You Men 2")
def mirroirh():
print("""
|---| Grabb Sites From Mirror-h.org |--|
\033[91m[1] \033[95mGrabb Sites By Onhold
\033[91m[2] \033[95mGrabb Sites By Auto_Notifier
""")
sec = int(raw_input("Choose Section: "))
if sec == 1:
url = "https://mirror-h.org/archive/page/"
try:
for pp in range(1, 40254):
dz = requests.get(url + str(pp))
dzz = dz.content
qwd = re.findall(r'/zone/(.*)</a></td>', dzz)
print(" \033[91m[*] Please Wait To Grabb Sites ...... Page: "), pp
for ii in qwd:
ii = ii.replace('<i class="icon-search"></i>', "")
ii = ii.replace(ii[:10], "")
ii = ii.replace("\r\n\r\n", "\r\n")
ii = ii.strip()
#iio = ii.replace('<i class="icon-search"></i>', "hhhhhhhhhhhhh")
print("\033[95m" + ii)
with open( 'onzeb_mirror.txt', 'a') as rr:
rr.write(ii + '\n')
except:
pass
elif sec == 2:
url = "https://mirror-h.org/search/hacker/"
try:
for ha in range(1, 2000):
print("\033[91mWait To Grabb From Hacker: "), ha
dz = requests.get(url + str(ha) + "/pages/1")
dzz = dz.content
qwd = re.findall(r'/pages/\d" title="Last"', dzz)
for i in qwd:
i = i.rstrip()
sss = i.replace("/pages/","")
ss = sss.replace('" title="Last"',"")
ssf = int(ss) + 1
for ii in range(1, ssf):
print(" \033[91m[*] Please Wait To Grabb Sites ...... Page: "), ii
dd = requests.get(url + str(ha) + "/pages/"+ str(ii))
op = dd.content
qwdd = re.findall(r'/zone/(.*)</a></td>', op)
for idi in qwdd:
idi = idi.replace('<i class="icon-search"></i>', "")
idi = idi.replace(idi[:10], "")
idi = idi.replace("\r\n\r\n", "\r\n")
idi = idi.strip()
#iio = ii.replace('<i class="icon-search"></i>', "hhhhhhhhhhhhh")
print("\033[95m" + idi)
with open( 'top_mirror.txt', 'a') as rr:
rr.write(idi + '\n')
except:
pass
def overflowzone():
print("""
|---| Grabb Sites From overflowzone.com |--|
\033[91m[1] \033[95mGrabb Sites By Onhold
\033[91m[2] \033[95mGrabb Sites By AutoNotifier
""")
sec = int(raw_input("Choose Section: "))
if sec == 1:
url = "http://attacker.work/onhold/onhold/page/"
dz = requests.get(url + "1")
dzz = dz.content
tn = re.findall(r'<a href="/onhold/page/(.*)" title="Last">', dzz)
for ii in tn:
qwd = ii.split('/')[-1]
for ok in range(1, int(qwd)):
okk = requests.get(url + str(ok))
print("`\t\t\t" + url + str(ok))
fel = okk.content
okkk = re.findall(r'">http://(.*)</a></td>', fel)
for iii in okkk:
iii = iii.rstrip()
print("\033[95mhttp://" + iii.split('/')[0])
with open( 'onhold_attackerwork.txt', 'a') as rr:
rr.write("http://" + iii.split('/')[0] + '\n')
elif sec == 2:
url = "http://attacker.work/archive/page/"
dz = requests.get(url + "1")
dzz = dz.content
tn = re.findall(r'<a href="/archive/page/(.*)" title="Last">', dzz)
for ii in tn:
qwd = ii.split('/')[-1]
for ok in range(1, int(qwd)):
okk = requests.get(url + str(ok))
print("`\t\t\t" + url + str(ok))
fel = okk.content
okkk = re.findall(r'">http://(.*)</a></td>', fel)
for iii in okkk:
iii = iii.rstrip()
print("\033[95mhttp://" + iii.split('/')[0])
with open( 'archive_attackerwork.txt', 'a') as rr:
rr.write("http://" + iii.split('/')[0] + '\n')
else:
print("hhhhhhhh tnkt")
def bYPAS():
exploit = ["/member/","/admin/login.php","/admin/panel.php","/admin/","/login.php","/admin.html","/admin.php","/admin-login.php"]
try:
q = raw_input('\033[96m Entre Liste Site: \033[90m ')
q = open(q, 'r')
except:
print("\033[91mEntre List Sites -_- #Noob ")
sys.exit()
for lst in q:
lst = lst.rstrip()
print("\033[94m Wait Scaning ....... \033[94m"), lst
for exploits in exploit:
exploits.rstrip()
try:
if lst[:7] == "http://":
lst = lst.replace("http://","")
if lst[:8] == "https://":
lst = lst.replace("https://", "")
if lst[-1] == "/":
lst = lst.replace("/","")
socket.setdefaulttimeout(5)
conn = httplib.HTTPConnection(lst)
conn.request("POST", exploits)
conn = conn.getresponse()
htmlconn = conn.read()
if conn.status == 200 and ('type="password"') in htmlconn:
print("\033[92m [+] Admin Panel [+] ======\033[96m=======> \033[96m ") , lst + exploits
with open("admin_panels.txt", "a") as by:
by.writelines(lst + exploits + "\n")
else:
print("\033[91m [-] Not Found : [-]"),lst + exploits
except:
pass
def add_http():
dz = raw_input("Entre List Site: ")
dz = open(dz, "r")
for i in dz:
i = i.rstrip()
print("http://"+i)
with open( 'aziz.txt', 'a') as rr:
rr.write("http://" + i + '\n')
print("Text Saved !!")
def binger():
qwd = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:57.0) Gecko/20100101 Firefox/57.0"}
print("""
\033[91m[1] \033[95mGrabb Sites By Ip List
\033[91m[1] \033[95mGrabb Sites Fox_Contact And Bypass By Ip List
""")
o = int(raw_input("Choose Section: "))
if o == 1:
gr = raw_input('Give me List Ip: ')
gr = open(gr,'r')
for done in gr:
remo = []
page = 1
while page < 251:
bing = "http://www.bing.com/search?q=ip%3A"+done+"+&count=50&first="+str(page)
opene = requests.get(bing,verify=False,headers=qwd)
read = opene.content
findwebs = re.findall('<h2><a href="(.*?)"', read)
for i in findwebs:
o = i.split('/')
if (o[0]+'//'+o[2]) in remo:
pass
else:
remo.append(o[0]+'//'+o[2])
print '{}[XxX] '.format(fg,sb),(o[0]+'//'+o[2])
with open('Grabbed.txt','a') as s:
s.writelines((o[0]+'//'+o[2])+'\n')
page = page+5
elif o == 2:
qwd = {"User-Agent": "Mozilla/5.0 (Windows NT 6.1; rv:57.0) Gecko/20100101 Firefox/57.0"}
gr = raw_input('Give me List Ip: ')
gr = open(gr,'r')
for done in gr:
remo = []
page = 1
print("Wait Grabb Sites From iP: "), done
while page < 251:
bing = "http://www.bing.com/search?q=ip%3A"+done + " powered by fox_contact"+"+&count=50&first="+str(page)
opene = requests.get(bing,verify=False,headers=qwd)
read = opene.content
findwebs = re.findall('<h2><a href="(.*?)"', read)
for i in findwebs:
o = i.split('/')
if (o[0]+'//'+o[2]) in remo:
pass
else:
remo.append(o[0]+'//'+o[2])
print '[XxX] ' + (o[0]+'//'+o[2])
with open('foxcontact.txt','a') as s:
s.writelines((o[0]+'//'+o[2])+'\n')
page = page+5
bing = "http://www.bing.com/search?q=ip%3A"+done + " admin/login.php"+"+&count=50&first="+str(page)
opene = requests.get(bing,verify=False,headers=qwd)
read = opene.content
findwebs = re.findall('<h2><a href="(.*?)"', read)
for i in findwebs:
o = i.split('/')
if (o[0]+'//'+o[2]) in remo:
pass
else:
remo.append(o[0]+'//'+o[2])
dd = requests.get(o[0]+'//'+o[2] + "/admin/login.php")
ddd = dd.content
if 'type="password"' in ddd:
print("\033[92mAdmin_Panel Site: >>>>>>\033[91m"),o[0]+'//'+o[2] + "/admin/login.php"
with open('admin panel.txt','a') as s:
s.writelines((o[0]+'//'+o[2])+'\n')
page = page+5
else:
print("dir numero azbi nooooooob")
def cms_detected():
lst = raw_input("Entre List Site: ")
lst = open(lst, 'r')
for i in lst:
i = i.rstrip()
print("\033[91m[+] \033[95mPlease Waiting To Scaning ... "), "\033[94m" + i + " \033[91m[+]"
try:
dz = requests.get(i)
ok = dz.content
#-------------WP---------------------------
if "wp-content" in ok:
print("\033[92mWp Site : >>>>>>>>>>>>>>\033[91m"), i + "/wp-login.php"
with open("wp sites.txt", "a") as wpp:
wpp.writelines(i + "/wp-login.php"+ "\n")
#-------JM--------------------------
elif "com_content" in ok:
print("\033[92mJm Site: >>>>>>>>>>>>>>\033[91m"), i + "/administrator/"
with open("joomla sites.txt", "a") as jmm:
jmm.writelines(i + "/administrator/"+ "\n")
#---------OPENCARTE-----------------------
elif "index.php?route" in ok:
print("\033[92mOpenCart Site: >>>>>>>>>>>>>>\033[91m"), i + "/admin/"
with open("OpenCart sites.txt", "a") as opncrt:
opncrt.writelines(i + "/admin/"+ "\n")
#---------------------------------
elif "/node/" in ok:
print("\033[92mDrupal Site: >>>>>>>>>>>>>>\033[91m"), i + "/user/login"
with open("Drupal sites.txt", "a") as drbl:
drbl.writelines(i + "/user/login"+ "\n")
else:
bypass = ["/admin/login.php","/admin/","/login.php","/admin.html","/admin.php","/member/"]
for byp in bypass:
byp = byp.rstrip()
dd = requests.get(i + byp)
ddd = dd.content
if 'type="password"' in ddd:
print("\033[92mAdmin_Panel Site: >>>>>>\033[91m"),i + byp
with open("Admin Sites.txt", "a") as by:
by.writelines(i + byp + "\n")
else:
pass
print("\033[91m[-] Not Found Cms: [-]"), "\033[91m" + i
except:
pass
def spotii():
#url = "https://www.spotify.com"
rl = "http://www.spotify.com/us/xhr/json/isEmailAvailable.php?signup_form[email]="
try:
ok = raw_input("{}root@kil3r~# Entre List Email: ".format(fy,sn))
okd = open(ok, 'r')
except:
print("{}zebi entre list email -_- nooob").format(fh,sn)
for i in okd:
i = i.rstrip()
qwd = url + i + "&email=" + i
dz = requests.get(qwd, headers=user)
dzz = dz.content
if 'false' in dzz:
print("{} [LIVE] ".format(fg,sn)), "{}".format(fg,sn) + i
with open("spotify checked.txt", "a") as zebi:
zebi.writelines(i + '\n')
else:
print("{} [DEAD] ").format(fh,sn), "{}".format(fh,sn) + i
def clearscrn():
if system() == 'Linux':
os.system('clear')
if system() == 'Windows':
os.system('cls')
os.system('color a')
clearscrn()
def slowprint(s):
for c in s + '\n':
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(4. / 100)
def helper4():
clearscrn()
banner = """\033[94m
_ _ _ ___ _ _____
███╗░░██╗░█████╗░███╗░░██╗░█████╗░░██████╗███████╗░█████╗░
████╗░██║██╔══██╗████╗░██║██╔══██╗██╔════╝██╔════╝██╔══██╗
██╔██╗██║███████║██╔██╗██║██║░░██║╚█████╗░█████╗░░██║░░╚═╝
██║╚████║██╔══██║██║╚████║██║░░██║░╚═══██╗██╔══╝░░██║░░██╗
██║░╚███║██║░░██║██║░╚███║╚█████╔╝██████╔╝███████╗╚█████╔╝
╚═╝░░╚══╝╚═╝░░╚═╝╚═╝░░╚══╝░╚════╝░╚═════╝░╚══════╝░╚════╝░
"""
print("""\033[95m
_ _ _ ___ _ _____
███╗░░██╗░█████╗░███╗░░██╗░█████╗░░██████╗███████╗░█████╗░
████╗░██║██╔══██╗████╗░██║██╔══██╗██╔════╝██╔════╝██╔══██╗
██╔██╗██║███████║██╔██╗██║██║░░██║╚█████╗░█████╗░░██║░░╚═╝
██║╚████║██╔══██║██║╚████║██║░░██║░╚═══██╗██╔══╝░░██║░░██╗
██║░╚███║██║░░██║██║░╚███║╚█████╔╝██████╔╝███████╗╚█████╔╝
╚═╝░░╚══╝╚═╝░░╚═╝╚═╝░░╚══╝░╚════╝░╚═════╝░╚══════╝░╚════╝░
Greetz To : \033[93mNano\033[92m Hackers \033[91m|D\033[92mz| \033[91 XAttacker \033[92mHackers
""")
slowprint("\n\t\t\t\t\tPowered By : N.." + "\n\t\t\t\t\t\tDiscord : N..#1337 ICQ : 748166881")
print("")
print("""
\033[91m[1] \033[95mGrabb Sites \033[92m From Zone-h.org | \033[91m[3] \033[95mGrabb Sites \033[92m From mirror-h.org |
\033[91m[2] \033[95mGrabb Sites \033[92m From Defacers.org | \033[91m[4] \033[95mGrabb Sites \033[92m From overflowzone.com |
\033[91m[5] \033[95mGet Sites bypass With List [Bypass Finder]
\033[91m[6] \033[95mMass Add (http://) To List ^_^
\033[91m[7] \033[95mGrabber Sites From Bing :D
\033[91m[8] \033[95mCms Filter
\033[91m[9] \033[95mEmail Valid Spotify
#######################################################
# Love 4 USA |\033[91m| Live 4 USA #
#######################################################
""")
try:
qq = int(raw_input("\033[91m[-] \033[90mroot@kil3r~# \033[92mChoose Section !!\033[95m : \033[90m"))
if qq == 1:
clearscrn()
print(banner)
zonehh()
if qq == 2:
clearscrn()
print(banner)
defacers()
if qq == 3:
clearscrn()
print(banner)
mirroirh()
if qq == 4:
clearscrn()
print(banner)
overflowzone()
if qq == 5:
clearscrn()
print(banner)
bYPAS()
if qq == 6:
clearscrn()
print(banner)
add_http()
if qq == 7:
clearscrn()
print(banner)
binger()
if qq == 8:
clearscrn()
print(banner)
cms_detected()
if qq == 9:
clearscrn()
print(banner)
spotii()
except:
pass
helper4()
| [
"50776038+chuxuantinh@users.noreply.github.com"
] | 50776038+chuxuantinh@users.noreply.github.com |
06dd6b5555c649a9e05af4e1aba0d16b729d5704 | 04164e028417ff8472b9f2bfec0ec45b0888f743 | /development/interface-pyqt/huge_0d/huge_0d.py | 73f9c7d148eb66cc4f4c236bcc06da783dbc9417 | [] | no_license | Huaguiyuan/quantum-honeycomp | c2b810ff5f5e25d41b1f0c1c1ff7ae500b04dc31 | 50deb0e59fffe4031f05094572552ca5be59e741 | refs/heads/master | 2020-03-22T19:09:58.148862 | 2018-07-08T19:51:58 | 2018-07-08T19:51:58 | 140,510,217 | 1 | 2 | null | 2018-07-11T02:20:32 | 2018-07-11T02:20:32 | null | UTF-8 | Python | false | false | 15,274 | py | #!/usr/bin/python
from __future__ import print_function
import sys
import os
import time
qhroot = os.environ["QHROOT"] # root path
sys.path.append(qhroot+"/interface-pyqt/qtwrap")
sys.path.append(qhroot+"/pysrc/") # python libraries
import qtwrap # import the library with simple wrappaers to qt4
get = qtwrap.get # get the value of a certain variable
getbox = qtwrap.getbox # get the value of a certain variable
window = qtwrap.main() # this is the main interface
modify = qtwrap.modify
from qh_interface import * # import all the libraries needed
getactive = qtwrap.is_checked
def getfile(name):
"""Get the name of the file"""
return builder.get_object(name).get_filename()
def get_vacancies():
"""Get the value of a certain variable"""
name = "vacancies" # name of the object
ats = builder.get_object(name).get_text()
ats = ats.replace(","," ") # substitute comma by space
ats = ats.split() # separte bu commas
# print "Remove atoms = ",ats
ats = [int(float(a)) for a in ats] # convert to int
return ats # return list
def get_numbers(name):
"""Get the values of a certain variable"""
ats = get(name,string=True)
ats = ats.replace(","," ") # substitute comma by space
ats = ats.split() # separte bu commas
ats = [int(float(a)) for a in ats] # convert to int
return ats
def get_geometry0d(second_call=False):
""" Create a 0d island"""
t0 = time.clock() # initial time
lattice_name = getbox("lattice")
if lattice_name=="Honeycomb":
geometry_builder = geometry.honeycomb_lattice
elif lattice_name=="Square":
geometry_builder = geometry.square_lattice
elif lattice_name=="Kagome":
geometry_builder = geometry.kagome_lattice
elif lattice_name=="Lieb":
geometry_builder = geometry.lieb_lattice
elif lattice_name=="Triangular":
geometry_builder = geometry.triangular_lattice
else: raise
# first create a raw unit cell
gbulk = geometry_builder() # build a 2d unit cell
# now scuplt the geometry
nf = 1+get("size") # get the desired size, in float
####################################
####################################
if getbox("geometry_mode") == "Positions": # generate a perfect island
os.system("cp "+getfile("positions_file")+" POSITIONS.OUT")
g = geometry.read()
g.center()
return g
elif getbox("geometry_mode") == "Recipe": # generate a perfect island
nedges = int(get("nedges")) # number of edges
angle = get("rotation")*2.*np.pi/360 # angle to rotate
g = islands.get_geometry(geo=gbulk,n=nf,nedges=nedges,
rot=angle,clean=False)
elif getbox("geometry_mode") == "Image": # generate from an image
print("Direction",getfile("image_path"))
g = sculpt.image2island(getfile("image_path"),gbulk,size=int(nf),color="black")
else: raise
####################################
#############################################
# if a precise diameter is wanted, only for the first call
if getactive("target_diameter") and not second_call:
diameter = get("desired_diameter")
ratio = diameter/g.get_diameter() # ratio between wanted and obtained
print("\nChecking that it has the desired size",ratio)
if not 0.99<ratio<1.01: # if outside the tolerance
newsize = round(ratio*float(get("size"))) # new size
modify("size",newsize) # modify the value
print("Recalling the geometry with size",newsize)
return get_geometry0d(second_call = True)
else: pass
####################################
# clean the island
g.center() # center the geometry
############################################3
# g = modify_geometry(g) # modify the geometry in several ways
print("Total number of atoms =",len(g.r))
print("Time spent in creating the geometry =",time.clock() - t0)
if getactive("clean_island"): # if it is cleaned
g = sculpt.remove_unibonded(g,iterative=True) # remove single bonded atoms
return g
def modify_geometry(g):
"""Modify the geometry according to the interface"""
mtype = getbox("modify_geometry")
print("Modifying geometry according to",mtype)
if mtype == "None": return g # do nothing
elif mtype == "Index":
return sculpt.remove(g,get_vacancies()) # removes several atoms
elif mtype=="Choose atoms": # special case
print("Removing as chosen\n")
try:
inds = np.genfromtxt("REMOVE_ATOMS.INFO") # selected atoms
print("Removed indexes",inds)
except: return g
try:
inds = [int(i) for i in inds] # as integer
except: inds = [int(inds)]
try: return sculpt.remove(g,inds) # removes several atoms
except: return g
def initialize():
""" Initialize the calculation"""
t0 = time.clock()
os.system("rm SELECTED_ATOMS.INFO") # remove the file for the DOS
g = get_geometry0d() # get the geometry
h = hamiltonians.hamiltonian(g) # get the hamiltonian
h.has_spin = False # spin treatment
if h.has_spin: # spinful hamiltonian
print("Spinful hamiltonian, DO NOT USE VERY LARGE ISLANDS!!!")
h.is_sparse = False
h.first_neighbors() # first neighbor hoppin
h.add_zeeman([get("Bx"),get("By"),get("Bz")]) # Zeeman fields
if abs(get("rashba")) > 0.0: h.add_rashba(get("rashba")) # Rashba field
h.add_antiferromagnetism(get("mAF")) # AF order
if abs(get("kanemele"))>0.0: h.add_kane_mele(get("kanemele")) # intrinsic SOC
h.shift_fermi(get("fermi")) # shift fermi energy
h.turn_sparse() # turn it sparse
else: # spinless treatment
h.is_sparse = True
print("Spinless hamiltonian")
h.first_neighbors() # first neighbor hopping
h.add_sublattice_imbalance(get("mAB")) # sublattice imbalance
h.add_peierls(get("peierls")) # add magnetic field
if get("haldane")!=0.0:
h.add_haldane(get("haldane")) # add Haldane coupling
if get("edge_potential")!=0.0: # if there is edge potential
edgesites = edge_atoms(h.geometry) # get the edge atoms
h.shift_fermi(edgesites) # add onsites
# part for bilayer systems
#####
print("Time spent in creating the Hamiltonian =",time.clock() - t0)
h.geometry.write()
h.save() # save the Hamiltonian
def show_ldos():
h = load_hamiltonian() # get the hmiltonian
points = int(get("LDOS_polynomials")) # number of polynomials
x = np.linspace(-.9,.9,int(get("num_ene_ldos"))) # energies
h.intra = h.intra/6.0 # normalize
# make the update of the atoms number if SELECTED_ATOMS file exist
if os.path.isfile("SELECTED_ATOMS.INFO"):
ind_atoms = open("SELECTED_ATOMS.INFO").read().replace("\n",", ")
modify("LDOS_num_atom",ind_atoms)
# now continue in the usual way
atoms = get_numbers("LDOS_num_atom")
ecut = get("energy_cutoff_local_dos")/6.
os.system("rm LDOS_*") # remove
for iatom in atoms: # loop over atoms
if h.has_spin: iatom = iatom*2 # if spinful
x = np.linspace(-ecut,ecut,int(get("num_ene_ldos")),endpoint=True) # energies
mus = kpm.local_dos(h.intra,n=points,i=iatom) # calculate moments
y = kpm.generate_profile(mus,x) # calculate DOS
x,y = x*6.,y/6. # renormalize
y = dos.convolve(x,y,delta=get("smearing_local_dos")) # add broadening
dos.write_dos(x,y) # write dos in file
fname = "LDOS_"+str(iatom)+".OUT" # name of the file
os.system("mv DOS.OUT " + fname) # save the file
execute_script("qh-several-ldos ")
def show_full_spectrum():
h = load_hamiltonian() # get the hmiltonian
nmax = 10000
if h.intra.shape[0]<nmax:
h.get_bands()
execute_script("qh-bands0d ")
else:
print("Too large Hamiltonian ",nmax)
def load_hamiltonian():
h = hamiltonians.load() # load the Hamiltonian
return h
def show_dos():
h = load_hamiltonian() # get the hmiltonian
points = int(get("DOS_polynomials")) # number of polynomials
x = np.linspace(-.9,.9,int(get("num_ene_dos"))) # energies
h.intra = h.intra/6.0 # normalize
ntries = get("DOS_iterations")
t0 = time.clock() # initial time
mus = kpm.random_trace(h.intra,n=points,ntries=100) # calculate moments
y = kpm.generate_profile(mus,x) # calculate DOS
x,y = x*6.,y/6. # renormalize
y = dos.convolve(x,y,delta=get("smearing_dos")) # add broadening
dos.write_dos(x,y) # write dos in file
execute_script("qh-dos DOS.OUT")
print("Time spent in Kernel PM DOS calculation =",time.clock() - t0)
def show_spatial_dos():
t0 = time.clock()
h = load_hamiltonian() # get the hamiltonian
mode_stm = getbox("mode_stm") # get the way the images will be calculated
delta = get("smearing_spatial_DOS")
def shot_dos(energy):
if mode_stm == "Full": # matrix inversion
ldos.ldos0d(h,e = energy,
delta = delta)
elif mode_stm == "Eigen": # Using Arnoldi
ldos.ldos0d_wf(h,e = energy,
delta = delta,
num_wf = int(get("nwaves_dos")),
robust=True,tol=delta/1000)
mode_dosmap = getbox("mode_dosmap") # one shot or video
if mode_dosmap=="Single shot":
energy = get("energy_spatial_DOS") # one shot
shot_dos(energy) # calculate
print("Time spent in STM calculation =",time.clock() - t0)
execute_script("qh-fast-ldos LDOS.OUT ") # using matplotlib
if mode_dosmap=="Movie": # do a sweep
energies = np.linspace(get("mine_movie"),get("maxe_movie"),
int(get("stepse_movie")))
fof = open("FRAMES.OUT","w") # file with frames
for energy in energies:
print("Calculating",energy)
shot_dos(energy) # calculate
name = "ENERGY_"+'{0:.8f}'.format(energy)+"_LDOS.OUT"
os.system("mv LDOS.OUT "+name) # save the data
execute_script("qh-silent-ldos "+name) # save the image
namepng = name+".png" # name of the image
fof.write(namepng+"\n") # save the name
fof.close() # close file
os.system("xdg-open "+namepng) # open the last file
def edge_atoms(g,nn=3):
"""Get the edge potential"""
cs = g.get_connections() # get the connections
v1 = np.array([int(len(c)<nn) for c in cs]) # check if the atom is on the edge or not
# and the first neighbors to the edge
# v2 = np.zeros(len(v1)) # initialize
# for i in range(len(v1)): # loop
# if v1[i]==0: # not in the edge yet
# for ic in cs[i]: # loop over neighbors
# if v1[ic]==1: # edge atom
# v2[i] = 1 # assign
# break
# v = v1 + v2*2./3. # sum
v = v1
np.savetxt("EDGE.OUT",np.matrix([g.x,g.y,v]).T) # save
return v # return the array
def show_potential():
g = get_geometry0d() # get the geometry
edge_atoms(g)
execute_script("qh-absolute-potential EDGE.OUT ")
def show_lattice():
"""Show the lattice of the system"""
g = get_geometry0d() # get the geometry
g.write()
print("Structure has been created")
# if getactive("show3d"): execute_script("qh-pick ")
execute_script("qh-fast-pick ")
def show_path_dos():
"""Show the the DOS in the path"""
calculate_path_dos() # calculate the path
h = load_hamiltonian() # get the hmiltonian
pols = int(get("pols_path")) # number of polynomials
h.intra = h.intra/6.0 # normalize
atoms = np.genfromtxt("PATH.OUT").transpose()[0] # indexes
atoms = [int(a) for a in atoms] # indexes of the atoms
os.system("rm LDOS_*") # clean all the data
ecut = np.abs(get("ecut_path")) # maximum energy in path
if ecut>5.5: ecut = 5.9
ecut /= 6.0 # to interval 0,1
for iatom in atoms: # loop over atoms
print("Calculating DOS in ",iatom)
if h.has_spin: iatom = iatom*2 # if spinful
mus = kpm.local_dos(h.intra,n=pols,i=iatom) # calculate moments
x = np.linspace(-ecut,ecut,int(get("num_ene_path"))) # energies
y = kpm.generate_profile(mus,x) # calculate DOS
xout,yout = x*6.,y/6. #renormalize
yout = dos.convolve(xout,yout,delta=get("smearing_path_dos")) # add broadening
dos.write_dos(xout,yout) # write dos in file
fname = "LDOS_"+str(iatom)+".OUT" # name of the file
os.system("cp DOS.OUT " + fname) # save the file
execute_script("qh-dos-path ")
def show_path():
"""Show the path followed in the DOS, when calculating several
atoms"""
calculate_path_dos() # calculate the path
execute_script("qh-path ")
def calculate_path_dos():
"""Calculate all the DOS in a path"""
i0 = int(get("initial_atom"))
i1 = int(get("final_atom"))
h = load_hamiltonian() # get the hamiltonian
r0 = h.geometry.r[i0] # initial point
r1 = h.geometry.r[i1] # final point
# now do something dangerous, rotate the geometry to check which
# atoms to accept
print("Initial position",r0)
print("Final position",r1)
print("Created new rotated geometry")
gtmp = h.geometry.copy() # copy geometry
gtmp.shift(r0) # shift the geometry
gtmp = sculpt.rotate_a2b(gtmp,r1-r0,np.array([1.,0.,0.]))
# new positions
r0 = gtmp.r[i0]
r1 = gtmp.r[i1]
dr = r1 - r0 # vector between both points
dy = np.abs(get("width_path")) # width accepted
dx = np.sqrt(dr.dot(dr))+0.1
print("Initial ROTATED position",r0)
print("Final ROTATED position",r1)
# and every coordinate
x1,y1 = r0[0],r0[1]
x2,y2 = r1[0],r1[1]
ym = (y1+y2)/2. # average y
def fun(r):
"""Function that decides which atoms to calculate"""
x0 = r[0]
y0 = r[1]
if (x1-dy)<x0<(x2+dy) and np.abs(y0-ym)<dy: return True
else: return False
inds = sculpt.intersected_indexes(gtmp,fun) # indexes of the atoms
fo = open("PATH.OUT","w") # output file
fo.write("# index of the atom, step in the path, x, y\n")
ur = dr/np.sqrt(dr.dot(dr)) # unitary vector
steps = [(gtmp.r[i] - r0).dot(ur) for i in inds] # proyect along that line
inds = [i for (s,i) in sorted(zip(steps,inds))] # sort by step
steps = sorted(steps) # sort the steps
# only select those between the initial and final points
inds0,steps0 = [],[]
# for (i,s) in (inds,steps):
# if 0<s<1.
####################
g = h.geometry
for (i,s) in zip(inds,steps):
fo.write(str(i)+" "+str(s)+" "+str(g.x[i])+" "+str(g.y[i])+"\n")
fo.close() # close file
inipath = os.getcwd() # get the initial directory
save_results = lambda x: save_outputs(inipath,tmppath) # function to save
def clear_removal():
os.system("rm SELECTED.INFO") # remove the file
def select_atoms():
execute_script("qh-fast-pick write remove ") # remove the file
def select_atoms_dos():
execute_script("qh-fast-pick write ") # remove the file
# create signals
signals = dict()
signals["initialize"] = initialize # initialize and run
signals["show_ldos"] = show_ldos # show LDOS
signals["show_dos"] = show_dos # show DOS
signals["show_spatial_dos"] = show_spatial_dos # show DOS
signals["show_lattice"] = show_lattice # show magnetism
#signals["show_full_spectrum"] = show_full_spectrum # show all the eigenvalues
signals["show_path"] = show_path # show the path
signals["show_path_dos"] = show_path_dos # show the path
signals["show_potential"] = show_potential # show the potential added
signals["save_results"] = save_results # save the results
#signals["clear_removal"] = clear_removal # clear the file
#signals["select_atoms"] = select_atoms # select_atoms
signals["select_atoms_dos"] = select_atoms_dos # select_atoms
window.connect_clicks(signals)
folder = create_folder()
tmppath = os.getcwd() # get the initial directory
initialize() # do it once
window.run()
| [
"jose.luis.lado@gmail.com"
] | jose.luis.lado@gmail.com |
04029d54021ba3eb39836ff12ff18836c8ad318e | ed8a4499404adceaa64f9928870b50d7187aab25 | /parser.py | 75bcfa372cd3ee0f969b97be55beaf336ecc63da | [] | no_license | bishalthingom/football-data | b965cbc47ee7b716c58a4c0cedeaf383b9195fb0 | 743aa4d10b0e400c0ef1328f887115d6330068c6 | refs/heads/master | 2021-08-08T04:21:15.441709 | 2017-11-09T15:08:55 | 2017-11-09T15:08:55 | 110,129,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 947 | py | # coding: utf-8
import json
from pprint import pprint
output = open('players_data.csv','a+')
# input = 'trial.txt'
input = 'players_data.txt'
with open(input) as data_file:
data_lines = data_file.readlines()
for line in data_lines:
data = json.loads(line)
records = len(data["playerTableStats"])
for i in range(0,records,1):
stringval = ""
for key, value in data["playerTableStats"][i].items():
try:
value = value.encode('utf-8')
value = value.replace(',','-')
stringval += value
except:
value = str(value)
value = value.replace(',','-')
stringval += value
if key != 'passSuccess':
stringval += ','
#print stringval
output.write(stringval + '\n')
#print len(data)
output.close() | [
"bishalthingom@gmail.com"
] | bishalthingom@gmail.com |
5069b7fa589dea216ef97b476055fcf630474880 | 39dac505e0814d8f73d21576085b02e5e54d9b05 | /67.py | 4b3bbfabcd2eafad0676f05c08d42b459cf43d65 | [] | no_license | gxmls/Python_Leetcode | 4b2ce5efb0a88cf80ffe5e57a80185ca5df91af2 | 2a9bb36c5df0eaba026183a76deb677c5bd7fd2d | refs/heads/main | 2023-06-08T03:45:43.548212 | 2021-06-22T09:46:44 | 2021-06-22T09:46:44 | 362,726,420 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 773 | py | '''
给你两个二进制字符串,返回它们的和(用二进制表示)。
输入为 非空 字符串且只包含数字 1 和 0。
示例 1:
输入: a = "11", b = "1"
输出: "100"
示例 2:
输入: a = "1010", b = "1011"
输出: "10101"
提示:
每个字符串仅由字符 '0' 或 '1' 组成。
1 <= a.length, b.length <= 10^4
字符串如果不是 "0" ,就都不含前导零。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/add-binary
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
class Solution:
def addBinary(self, a: str, b: str) -> str:
sa='0b'+a
sb='0b'+b
s=str(bin(eval(sa)+eval(sb)))
return s[2:]
| [
"noreply@github.com"
] | noreply@github.com |
482c353fff888c769db1d16590da55024c835dc4 | 85990459c439fea21e89550eb54f28f398ef2c8b | /mysite/polls/migrations/0001_initial.py | 8979cdaf252d7d1a88eb78cef6b085d725238b12 | [] | no_license | sanchit-ahuja/Django_implementation | 904040168dc7bbdef0f96d5b045a13fc0d99387c | 7c76fd3b15e2a5db53c6ec2155b5f6753d2e7554 | refs/heads/master | 2021-09-01T07:43:01.327745 | 2017-12-25T19:44:19 | 2017-12-25T19:44:19 | 111,147,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,228 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-12-16 19:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Choice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=200)),
('votes', models.IntegerField(default=0)),
],
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('pub_date', models.DateTimeField(verbose_name='date published')),
],
),
migrations.AddField(
model_name='choice',
name='question',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question'),
),
]
| [
"sanchitahuja205@gmail.com"
] | sanchitahuja205@gmail.com |
0042805ba01b75f72a6d182e69b0536477c353c0 | 426377fac70f31cb99e357cdedc3140162ca3a36 | /main.py | 5adf3b25c7c504484901662acf09480f67e3c33c | [] | no_license | akaha/nlq2sparql | 0300df86840845b6070437aeb44bfb83be771c75 | 76f9163a8ccde17ade452a20bc292d97c599eab2 | refs/heads/master | 2021-09-05T19:30:26.460813 | 2017-11-30T12:37:01 | 2017-11-30T12:37:01 | 103,511,884 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,840 | py | import argparse
import copy
import json
import math
from Levenshtein import distance
import logging
import operator
import re
import string
from dbpedia import DBPedia
from utils import extractTriples, extractSelect
logging.basicConfig(filename='templates.log', level=logging.DEBUG)
class LCQuad:
def __init__(self, jsonQuad):
self.jsonQuad = jsonQuad
self.verbalizedQuestion = jsonQuad['verbalized_question']
self.correctedQuestion = jsonQuad['corrected_question']
self.id = jsonQuad['_id']
self.sparqlTemplateID = jsonQuad['sparql_template_id']
self.sparqlTemplate = None
self.sparqlQuery = jsonQuad['sparql_query']
self.sparqlQueryTriples = None
class Entity:
def __init__(self, uri, letter, dbpedia=None):
self.uri = uri
self.placeholder = '<' + letter + '>'
self.variable = '?' + string.lower(letter)
uriName = nameFromUri(uri)
if dbpedia != None:
names = dbpedia.getNames(uri)
self.names = names if len(names) > 0 else [uriName]
else:
self.names = [uriName]
class SparqlQuery:
def __init__(self, query):
self.query = query
self.selectClause = extractSelect(query)
self.whereClauseTriples = extractTriples(query)
def __str__(self):
tripleToString = lambda triple : ' '.join(map(lambda key : triple[key], ['subject', 'predicate', 'object']))
return self.selectClause + ' where { ' + ' . '.join(map(tripleToString, self.whereClauseTriples)) + ' }'
def toNSpMRow (lcQuad, dbpedia=None):
concatLists = lambda prevList, list : prevList + list
entityList = set(reduce(concatLists, extractEntities(lcQuad), []))
entities = map(lambda (uri, letter) : Entity(uri, letter, dbpedia), zip(entityList, string.ascii_uppercase))
nlQuestion = extractNLTemplateQuestionFromCorrectedQuestion(lcQuad, entities)
if not nlQuestion:
nlQuestion = extractNLTemplateQuestionFromVerbalizedQuestion(lcQuad, entities)
sparqlQuery = extractSparqlTemplateQuery(getattr(lcQuad, 'sparqlQuery'), entities)
sparqlGeneratorQuery = extractGeneratorQuery(sparqlQuery, entities)
row = [nlQuestion, sparqlQuery, sparqlGeneratorQuery]
return row
def extractNLTemplateQuestionFromCorrectedQuestion (lcQuad, entities):
question = getattr(lcQuad, 'correctedQuestion')
compare = lambda name, names : reduce(lambda prev, nameItem : compareStrings(name, nameItem) if prev == 0 else prev, names, 0)
compareNames = lambda names1, names2 : reduce(lambda prev, name : compare(name, names2) if prev == 0 else prev, names1, 0)
compareEntities = lambda entity1, entity2 : compareNames(getattr(entity1, 'names'), getattr(entity2, 'names'))
sortedEntities = sorted(entities, cmp=compareEntities)
for entity in sortedEntities:
names = getattr(entity, 'names')
escapedNames = map(re.escape, names)
placeholder = getattr(entity, 'placeholder')
matchInQuestion = re.search(r'(' + '|'.join(escapedNames) + r')s?', question, flags=re.IGNORECASE)
if (matchInQuestion):
question = string.replace(question, matchInQuestion.group(0), placeholder)
else:
logging.debug('Fuzzy search necessary for: "' + ' | '.join(names) + '" in: ' + question + ', ' + str(getattr(lcQuad, 'id')))
fuzzySearchMatch = fuzzySearch(names, question)
if (fuzzySearchMatch):
question = string.replace(question, fuzzySearchMatch.group(0), placeholder)
else:
logging.debug('Fuzzy entity detection failed for: "' + ' | '.join(names) + '" in: ' + question + ', ' + str(getattr(lcQuad, 'id')))
return None
return question
def extractNLTemplateQuestionFromVerbalizedQuestion (lcQuad, entities):
question = getattr(lcQuad, 'verbalizedQuestion')
wordsInBrackets = set(extractWordsInBrackets(question))
sortedWordsInBrackets = sorted(wordsInBrackets, cmp=compareStrings)
placeholders = map(lambda entity : mostSimilarPlaceholder(sortedWordsInBrackets, getattr(entity, 'names')), entities)
for bracketWord in sortedWordsInBrackets:
withBrackets = '<' + bracketWord + '>'
withoutBrackets = bracketWord
replacement = getattr(entities[placeholders.index(bracketWord)], 'placeholder') if bracketWord in placeholders else withoutBrackets
question = string.replace(question, withBrackets, replacement)
return question
def fuzzySearch (names, question ):
wordsInNames = map(lambda name : re.split(r'\W+', name), names)
wordsInQuestion = re.split(r'\W+', question)
maxSequenceLength = max(map(len, wordsInNames))
subsequences = buildSubsequences(wordsInQuestion, maxSequenceLength)
minDistance = lambda string, stringList : min(map(lambda stringItem : [distance(string, stringItem), stringItem], stringList), key=operator.itemgetter(0))
subsequencesWithLevenshteinDistance = map(lambda sequence: [sequence] + minDistance(' '.join(sequence), names), subsequences)
mostSimilar = min(subsequencesWithLevenshteinDistance, key=operator.itemgetter(1))
mostSimilarSequence = mostSimilar[0]
mostSimilarDistance = mostSimilar[1]
mostSimilarName = mostSimilar[2]
tolerance = math.ceil(len(mostSimilarName) / 2)
if (mostSimilarDistance <= tolerance):
sequencePattern = '\W+'.join(map(re.escape, mostSimilarSequence))
sequenceInQuestionMatch = re.search(sequencePattern, question)
if (not sequenceInQuestionMatch):
logging.debug('Failed to retransform: ' + str(sequencePattern))
return sequenceInQuestionMatch
else:
logging.debug(str(mostSimilarDistance) + ' as distance value seems too high: ' + ' '.join(mostSimilarSequence) + ' == ' + mostSimilarName + ' ?')
return None
def compareStrings(x, y ):
x_isSubstringOf_y = y.find(x) > -1
y_isSubstringOf_x = x.find(y) > -1
if x_isSubstringOf_y:
return 1
if y_isSubstringOf_x:
return -1
return 0
def buildSubsequences (sequence, maxLength):
subsequences = set()
for sequenceLength in range(1, maxLength + 1):
for startIndex in range(0, len(sequence) - sequenceLength + 1):
endIndex = startIndex + sequenceLength
subsequence = sequence[startIndex:endIndex]
subsequences.add(tuple(subsequence))
return subsequences
def extractSparqlTemplateQuery (query, entities):
def replaceEntityWithPlaceholder (sparqlQuery, entity):
entityString = re.compile(re.escape(getattr(entity, 'uri')), re.IGNORECASE)
triples = getattr(sparqlQuery, 'whereClauseTriples')
placeholder = getattr(entity, 'placeholder')
for triple in triples:
triple['subject'] = entityString.sub(placeholder, triple['subject'])
triple['object'] = entityString.sub(placeholder, triple['object'])
setattr(sparqlQuery, 'selectClause', entityString.sub(placeholder, getattr(sparqlQuery, 'selectClause')))
return sparqlQuery
def replaceRdfTypeProperty (sparqlQuery):
triples = getattr(sparqlQuery, 'whereClauseTriples')
for triple in triples:
triple['predicate'] = string.replace(triple['predicate'], '<https://www.w3.org/1999/02/22-rdf-syntax-ns#type>', 'a')
return sparqlQuery
replaceEntitiesWithPlaceholders = lambda sparqlQuery : reduce(replaceEntityWithPlaceholder, entities, sparqlQuery)
templateQuery = replaceRdfTypeProperty(replaceEntitiesWithPlaceholders(SparqlQuery(query)))
return templateQuery
def extractGeneratorQuery (sparqlQuery, entities):
def replacePlaceholderWithVariable (triple, entity):
for key in ['subject', 'object']:
if triple[key] == getattr(entity, 'placeholder'):
triple[key] = getattr(entity, 'variable')
return triple
query = copy.deepcopy(sparqlQuery)
triples = getattr(query, 'whereClauseTriples')
for triple in triples:
for entity in entities:
triple = replacePlaceholderWithVariable(triple, entity)
variables = map(lambda entity : getattr(entity, 'variable'), entities)
generatorSelectClause = 'select distinct ' + ', '.join(variables)
setattr(query, 'selectClause', generatorSelectClause)
return query
def extractGeneratorQueryOnlyFromTemplateQuery (sparqlQuery):
def extractPlaceholderPlaceholders(query):
placeholders = []
variable_pattern_a = r'<A>'
variable_pattern_b = r'<B>'
variable_pattern_c = r'<C>'
variable_match_a = re.search(variable_pattern_a, query, re.IGNORECASE)
variable_match_b = re.search(variable_pattern_b, query, re.IGNORECASE)
variable_match_c = re.search(variable_pattern_c, query, re.IGNORECASE)
if variable_match_a:
placeholders.append('<A>')
if variable_match_b:
placeholders.append('<B>')
if variable_match_c:
placeholders.append('<C>')
return placeholders
placeholders = extractPlaceholderPlaceholders(sparqlQuery)
whereStatementPattern = r'{(.*?)}'
whereStatementMatch = re.search(whereStatementPattern, sparqlQuery)
whereStatement = whereStatementMatch.group(1)
if whereStatement:
variables = []
for placeholder in placeholders:
variable = '?' + str.lower(placeholder[1])
variables.append(variable)
whereStatement = whereStatement.replace(placeholder, variable)
generatorSelectClause = 'select distinct ' + ', '.join(variables)
return '{} where {{ {} }}'.format(generatorSelectClause, whereStatement)
else:
return ''
def shortenVariableNames (queryString):
variablePattern = r'\s+?(\?\w+)'
variables = set(re.findall(variablePattern, queryString))
replacement = reduce(lambda query, (variable, letter) : string.replace(query, variable, '?' + letter), zip(variables, ['x', 'y', 'z', 'u', 'v', 'w', 'm', 'n']), queryString)
return replacement
def nameFromUri (uri):
stripPrefix = lambda s : s.replace('<http://dbpedia.org/resource/', '')
stripEndTag = lambda s : s.replace('>', '')
removeBrackets = lambda s : re.sub(r'\(.*?\)', '', s)
replaceUnderscores = lambda s : s.replace('_', ' ')
return string.strip(replaceUnderscores(removeBrackets(stripEndTag(stripPrefix(uri)))))
def mostSimilarPlaceholder (words, names):
minDistance = lambda word, names : min(map(lambda name : distance(word, name), names))
wordsWithLevenshteinDistance = map(lambda word : tuple([word, minDistance(word, names)]), words)
mostSimilarWord = min(wordsWithLevenshteinDistance, key=operator.itemgetter(1))[0]
return mostSimilarWord
def extractWordsInBrackets (question):
bracketPattern = r'\<(.*?)\>'
wordsInBrackets = re.findall(bracketPattern, question)
return wordsInBrackets
def extractEntities (quad):
queryTriples = extractTriples(getattr(quad, 'sparqlQuery'))
setattr(quad, 'sparqlQueryTriples', queryTriples)
possiblePositionsForLCQuadQueries = ['subject', 'object']
placeholderPositions = getPlaceholderPositions(getattr(quad, 'sparqlTemplate'), possiblePositionsForLCQuadQueries)
entities = []
for index in range(len(queryTriples)):
triple = queryTriples[index]
if len(placeholderPositions) > index:
positions = placeholderPositions[index]
tripleEntities = map(lambda position : triple[position], positions)
entities.append(tripleEntities)
else:
# in case of misformed templates
print getattr(quad, 'id'), getattr(quad, 'sparqlTemplateID')
return entities
def getPlaceholderPositions (template, possiblePlaceholderPositions=None):
if possiblePlaceholderPositions is None:
possiblePlaceholderPositions = ['subject', 'predicate', 'object']
placeholders = template['placeholders']
findPostitions = lambda triple : filter(lambda key : triple[key] in placeholders, possiblePlaceholderPositions)
positions = map(findPostitions, template['triples'])
return positions
def readQuads (file):
jsonList = json.loads(open(file).read())
quadList = map(LCQuad, jsonList)
return quadList
def setSparqlTemplates (lcQuads, templateFile):
templates = json.loads(open(templateFile).read())
map(lambda quad : setattr(quad, 'sparqlTemplate', findTemplate(templates, quad)), lcQuads)
return lcQuads
def findTemplate (templates, quad):
haveSameId = lambda template : template['id'] == getattr(quad, 'sparqlTemplateID')
matchingTemplates = filter(haveSameId, templates)
firstMatch = matchingTemplates[0] if len(matchingTemplates) > 0 else None
return firstMatch
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--quads', dest='dataset', metavar='quadFile', help='LC Quad dataset')
parser.add_argument('--templates', dest='templates', metavar='templateFile', help='templates')
args = parser.parse_args()
quadFile = args.dataset
templateFile = args.templates
rawQuads = readQuads(quadFile)
quads = setSparqlTemplates(rawQuads, templateFile)
quadsWithTemplates = filter(lambda quad: getattr(quad, 'sparqlTemplate') != None, quads)
# extractedEntities = map(extractEntities, quadsWithTemplates)
# print extractedEntities
dbpedia = DBPedia()
try:
for quad in quadsWithTemplates:
row = toNSpMRow(quad, dbpedia)
nlQuestion = row[0]
sparqlQuery = str(row[1])
generatorQuery = str(row[2])
id = getattr(quad, 'id')
print "%s\t%s\t%s\t%s" % (nlQuestion, sparqlQuery, generatorQuery, id)
finally:
dbpedia.saveCache() | [
"annkathrin.hartmann91@gmail.com"
] | annkathrin.hartmann91@gmail.com |
797fb5e4b44f2abe5c67ca7a0b9b2fba00dcd1e2 | f305f84ea6f721c2391300f0a60e21d2ce14f2a5 | /7_graph/经典题/置换环/amidakuji鬼脚图/1-最少的横线构造amidakuji.py | 32ff9e00794f1b88811986c870be0e27ba2b7ac0 | [] | no_license | 981377660LMT/algorithm-study | f2ada3e6959338ae1bc21934a84f7314a8ecff82 | 7e79e26bb8f641868561b186e34c1127ed63c9e0 | refs/heads/master | 2023-09-01T18:26:16.525579 | 2023-09-01T12:21:58 | 2023-09-01T12:21:58 | 385,861,235 | 225 | 24 | null | null | null | null | UTF-8 | Python | false | false | 889 | py | # 冒泡排序邻位交换 构造amidakuji的横线
from typing import List
def amidakuji(ends: List[int]) -> List[int]:
"""
冒泡排序构造amidakuji的横线
Args:
ends: 每个点最后的位置 (1-index)
Returns:
鬼脚图的横线,一共m条,从上往下表示.
lines[i] 表示第i条横线连接 line[i] 和 line[i]+1. (1-index)
"""
n = len(ends)
res = []
for i in range(n - 1):
isSorted = True
for j in range(n - 1 - i):
if ends[j] > ends[j + 1]:
isSorted = False
res.append(j + 1)
ends[j], ends[j + 1] = ends[j + 1], ends[j]
if isSorted:
break
return res[::-1] # 目标数组冒泡排序的逆序才是从上往下的横线顺序
assert amidakuji([5, 2, 4, 1, 3]) == [1, 3, 2, 4, 3, 2, 1]
| [
"lmt2818088@gmail.com"
] | lmt2818088@gmail.com |
b90aeeaf91357ef57cadf749b97cdee83370cd58 | ad23845f6e9ffb7578c19a7b532174631339eaa8 | /articles/views.py | 02e3ca619cd93ea529d2127143ef36bec4eb9c96 | [] | no_license | Papathiam/projetvacL3 | 7163b0381c860d4088bf572077ca91f2862801af | aaddc16d53bb43d46e477fb11ff0802820657bce | refs/heads/master | 2020-03-29T19:25:39.639253 | 2018-09-25T12:37:07 | 2018-09-25T12:37:07 | 150,262,551 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,301 | py | from django.shortcuts import render
# Create your views here.
from .ScanReseau import TestR
from .ScanPort import Test
from .ScanSystem import Test1
from .models import Articles,ArticlesForm
def show(request):
articles = Articles.objects.all()
return render(request, 'articles/show.html' , {'articles': articles })
def addArticle(request):
form = ArticlesForm(request.POST or None, request.FILES)
if form.is_valid():
article = Articles()
article.titre = form.cleaned_data["titre"]
article.body = form.cleaned_data["body"]
article.image = form.cleaned_data["image"]
article.save()
return render(request, 'articles/addArticle.html', {'form': form})
def menu(request):
return render(request, 'articles/menu.html')
def acc(request):
return render(request, 'articles/acc.html')
def ScanReseau(request):
pos1=TestR.testreseau()
return render(request, 'articles/ScanReseau.html', {'pos1':pos1})
def ScanPort(request):
pos=Test.testport()
return render(request, 'articles/ScanPort.html', {'pos':pos})
def ScanSystem(request):
poss=Test1.testsystem()
return render(request, 'articles/ScanSystem.html', {'poss':poss})
def Apropos(request):
return render(request, 'articles/Apropos.html')
| [
"papathiame11@gmail.com"
] | papathiame11@gmail.com |
c11768871460d3631293721ef67d0260fd3b14c8 | 0f0101f875eb7e5276b6f2a4b687a621c50cfbf6 | /Board.py | 16bd710dce1b94c5b25a12cf1edf3df04e964d4f | [] | no_license | Maxwell-cmd881/Othello_python | 0133b7b532b56c32a0ea70efb3e47c6c1fb67c5f | 2006cbaaf917c46e8df37f99a77603c26e7285c2 | refs/heads/main | 2023-06-27T14:17:06.633167 | 2021-07-28T23:20:05 | 2021-07-28T23:20:05 | 390,535,733 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,513 | py | from Chip import Chip
class Board:
BoardArray = []
def __init__(self):
self.BoardArray = [[Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_")],
[Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_")],
[Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_")],
[Chip("_"),Chip("_"),Chip("_"),Chip("A"),Chip("B"),Chip("_"),Chip("_"),Chip("_")],
[Chip("_"),Chip("_"),Chip("_"),Chip("B"),Chip("A"),Chip("_"),Chip("_"),Chip("_")],
[Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_")],
[Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_")],
[Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_"),Chip("_")]]
def getWinner(self):
intA = 0
for i in range(8):
for j in range(8):
intA = intA + 1
if intA == 32:
return "DRAW"
elif intA > 32:
return "Player A wins!!!"
else:
return "Player B wins!!!"
def __str__(self):
strRet = " 0 1 2 3 4 5 6 7\n"
for i in range(8):
strRet += str(i) + " "
for j in range(8):
strRet += " " + self.BoardArray[i][j].getState() + " "
strRet += "\n"
return strRet | [
"noreply@github.com"
] | noreply@github.com |
d9c5c465a1e726456f0258d91c27dba9bc8f31e3 | 3445d4fbc85d962235ff9036c687cf1c6061a6fc | /src/vabene/__init__.py | 0bc80208f2a5ae2b04a9673c604b99b944b302db | [
"MIT"
] | permissive | LaYeqa/vabene | c16c177c03abb10569a4ac98667165d0252e8795 | e69ffe8d8509b5ff775a8c31528f53c09d6bab7c | refs/heads/master | 2022-12-27T09:42:50.369092 | 2020-09-22T10:07:30 | 2020-09-22T10:07:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | from .atom import * # noqa
from .bond import * # noqa
from .molecule import * # noqa
__version__ = '0.0.5'
| [
"lukasturcani93@gmail.com"
] | lukasturcani93@gmail.com |
e58cc73de9cfb624d652ce8430bdb6f81340664c | 529b4177563ee81e2c32897dccd0f4a219bbb57c | /quests/client/utilities.py | ea15eec63bb60e109524583ec81de9659a7c03d2 | [] | no_license | Zujiry/BAI5-VS | 47607aa386ccab08399ab22efa74ed5ed398a677 | adfa98686f2f92fc35159fa832e28e4f48a3700e | refs/heads/master | 2021-09-03T16:08:55.087514 | 2018-01-10T09:55:00 | 2018-01-10T09:55:00 | 110,722,985 | 0 | 3 | null | 2017-12-05T15:57:11 | 2017-11-14T17:33:21 | Python | UTF-8 | Python | false | false | 241 | py | import requests
from quests.utils import get_config
def logout(_):
exit_check(True)
def exit_check(exit):
if exit:
raise Exception('Exiting')
def divide_line():
print()
print('#################################') | [
"till.pohland@haw-hamburg.de"
] | till.pohland@haw-hamburg.de |
6b2635ba044761b70c21a0615dfe7ff99392f967 | 1c6283303ceb883add8de4ee07c5ffcfc2e93fab | /Jinja2/lib/python3.7/site-packages/ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/frameRelay_template.py | ec8c961f3acb1c4fdc24e612b525e265eee952f2 | [] | no_license | pdobrinskiy/devcore | 0f5b3dfc2f3bf1e44abd716f008a01c443e14f18 | 580c7df6f5db8c118990cf01bc2b986285b9718b | refs/heads/main | 2023-07-29T20:28:49.035475 | 2021-09-14T10:02:16 | 2021-09-14T10:02:16 | 405,919,390 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,002 | py | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FrameRelay(Base):
__slots__ = ()
_SDM_NAME = 'frameRelay'
_SDM_ATT_MAP = {
'Address2ByteDlciHiOrderBits': 'frameRelay.header.frameRelayTag.frameRelay.address.address2Byte.dlciHiOrderBits-1',
'Address2ByteCrBit': 'frameRelay.header.frameRelayTag.frameRelay.address.address2Byte.crBit-2',
'Address2ByteEa0Bit': 'frameRelay.header.frameRelayTag.frameRelay.address.address2Byte.ea0Bit-3',
'Address2ByteDlciLoOrderBits': 'frameRelay.header.frameRelayTag.frameRelay.address.address2Byte.dlciLoOrderBits-4',
'Address2ByteFecnBit': 'frameRelay.header.frameRelayTag.frameRelay.address.address2Byte.fecnBit-5',
'Address2ByteBecnBit': 'frameRelay.header.frameRelayTag.frameRelay.address.address2Byte.becnBit-6',
'Address2ByteDeBit': 'frameRelay.header.frameRelayTag.frameRelay.address.address2Byte.deBit-7',
'Address2ByteEa1Bit': 'frameRelay.header.frameRelayTag.frameRelay.address.address2Byte.ea1Bit-8',
'HeaderControl': 'frameRelay.header.control-9',
'PaddingPad': 'frameRelay.header.padding.pad-10',
'HeaderNlpid': 'frameRelay.header.nlpid-11',
}
def __init__(self, parent, list_op=False):
super(FrameRelay, self).__init__(parent, list_op)
@property
def Address2ByteDlciHiOrderBits(self):
"""
Display Name: DLCI High Order Bits
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Address2ByteDlciHiOrderBits']))
@property
def Address2ByteCrBit(self):
"""
Display Name: CR Bit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Address2ByteCrBit']))
@property
def Address2ByteEa0Bit(self):
"""
Display Name: EA0 Bit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Address2ByteEa0Bit']))
@property
def Address2ByteDlciLoOrderBits(self):
"""
Display Name: DLCI Low Order Bits
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Address2ByteDlciLoOrderBits']))
@property
def Address2ByteFecnBit(self):
"""
Display Name: FECN Bit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Address2ByteFecnBit']))
@property
def Address2ByteBecnBit(self):
"""
Display Name: BECN Bit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Address2ByteBecnBit']))
@property
def Address2ByteDeBit(self):
"""
Display Name: DE Bit
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Address2ByteDeBit']))
@property
def Address2ByteEa1Bit(self):
"""
Display Name: EA1 Bit
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['Address2ByteEa1Bit']))
@property
def HeaderControl(self):
"""
Display Name: Control
Default Value: 0x03
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderControl']))
@property
def PaddingPad(self):
"""
Display Name: Pad
Default Value: 0x00
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['PaddingPad']))
@property
def HeaderNlpid(self):
"""
Display Name: NLPID
Default Value: 0xCC
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderNlpid']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| [
"pdobrinskiy@yahoo.com"
] | pdobrinskiy@yahoo.com |
2ca81778ad75b54b1ee1312e31d2fda72511ea5a | e6cb82c67747c557e097c6d0eabba366a4e4ae38 | /1abtraction.py | 60bf3f434d0e9075bc876693f476b2121831e64e | [] | no_license | rajeshkarel/demo-python | e176f898601c89d37e85c7563fec8fe2b8adb2e4 | 8fdbf3fb7a761376aec65aa862ca341b58663fb6 | refs/heads/master | 2020-07-15T09:25:14.615926 | 2019-09-07T11:24:08 | 2019-09-07T11:24:08 | 205,532,085 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 130 | py | class Myclass:
def __init__(self):
self.__y = 3
m = Myclass()
print(m._Myclass__y)
# Above all called name magling
| [
"rajesh.karel@gmail.com"
] | rajesh.karel@gmail.com |
d19dfa3eed0f1bc3691252d8686b954a26d90935 | 675a042d025bd1fde07a5698f70f87455b37cc77 | /Player.py | 19d3265b7b84daf0e2cd38a77804fdff9c42f1c3 | [] | no_license | NontasPapapadopoulos/tic-tac-toe | 7af31356a3038d1b2d35d56d64fca622e9ab4e23 | 9e0bb9f15425d37198d0242d3a1998b12984b8a9 | refs/heads/master | 2023-07-13T15:54:07.217309 | 2021-08-25T12:44:59 | 2021-08-25T12:44:59 | 399,815,655 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | class Player:
def __init__(self, mark):
self.mark = mark
def __repr__(self):
return "Player with mark " + self.mark
| [
"epameinondas.papadopoulos@ibm.com"
] | epameinondas.papadopoulos@ibm.com |
6c2e2c642c2a794300e72f27acca89653876696b | 829caaa5b382f0ff6147569fb4648ac3a44369d1 | /BlueScore.py | 9cf4beb834c80554b04d4d6472e7743306f8c3dc | [] | no_license | dani0f/Validador-para-GANS-de-texto | f92e7b0adcfeb528b4c2af4e83fa1e6ecb267ad3 | c42458fe60bbcea81a5c49fd436949d0dc848fc3 | refs/heads/master | 2023-08-14T05:26:43.404929 | 2021-10-03T20:09:36 | 2021-10-03T20:09:36 | 411,432,992 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,216 | py | from nltk.translate.bleu_score import sentence_bleu,SmoothingFunction
import csv
def csvToList(maxCorpusLen,minStringLen,filename):
csvList = []
with open(filename, newline='') as File:
reader = csv.reader(File)
for row in reader:
if(len(row[0]) >= minStringLen):
csvList.append(row[0].split())
if(len(csvList)==maxCorpusLen):
break
return csvList
def BleuScore(referencesList,candidate):
weights=(0.25,0.25,0.25,0.25)
smoothing_function= SmoothingFunction().method4
auto_reweigh = False
return(sentence_bleu(referencesList, candidate, weights=weights,smoothing_function=smoothing_function,auto_reweigh=auto_reweigh))
def BleuScoreFromTxt(referencesList,minStringLen,filename):
scoreList = []
with open(filename, newline='') as File:
reader = File.read().split("\n")
for row in reader:
if(len(row) >= minStringLen):
scoreList.append(BleuScore(referencesList,row.split()))
return scoreList
| [
"daniela.moreno1@mail.udp.cl"
] | daniela.moreno1@mail.udp.cl |
badf5c3a1f5ca3f8d0251b3d7208bfd58f21eaad | b5654252203be0ca9007169418a2638b5c591775 | /music/migrations/0001_initial.py | 0b72262c0e7986a39cf7ddbcc0e503e29a7cc6f2 | [] | no_license | Vasion05/django-app | ff578e4ebbca69b8673e6168c31a66c1b2503cd1 | abd6c6afdcfb45aa80cf1a6cfa31dec5aaa6fc62 | refs/heads/master | 2021-01-11T13:43:29.021293 | 2017-06-23T08:26:59 | 2017-06-23T08:26:59 | 95,101,987 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,238 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2017-06-22 05:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Album',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('artist', models.CharField(max_length=250)),
('album_title', models.CharField(max_length=500)),
('genre', models.CharField(max_length=100)),
('album_logo', models.CharField(max_length=1000)),
],
),
migrations.CreateModel(
name='Song',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file_type', models.CharField(max_length=10)),
('song_title', models.CharField(max_length=250)),
('album', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='music.Album')),
],
),
]
| [
"vasion.cse@gmail.com"
] | vasion.cse@gmail.com |
7a2bc49b98de8f205dd372825033a0ec455c0bdb | 31c31345946b3526ffe3f1eafbc9dd7cdb48e03a | /URI Online Judge/AD-HOC/1087 - Dama/dama.py | 6411d83e961c7be5c11c04add3307d57fad39af3 | [] | no_license | felipefoschiera/Competitive-Programming | 84967cb7c6b82df5990cccea5d5b6862b9e63f65 | fe664046d0161fd6a15d4b8d8f983e77c6dc3dcb | refs/heads/master | 2022-02-23T16:07:04.326089 | 2019-10-04T19:22:22 | 2019-10-04T19:22:22 | 198,893,046 | 0 | 0 | null | 2019-07-25T19:53:36 | 2019-07-25T19:53:36 | null | UTF-8 | Python | false | false | 562 | py | # -*- coding: utf-8 -*-
while True:
try:
x1, y1, x2, y2 = map(int, input().split())
movimentos = 0
if x1 == 0 and y1 == 0 and x2 == 0 and y2 == 0:
break
if x1 == x2 and y1 == y2:
movimentos = 0
elif x1 == x2 or y1 == y2:
movimentos = 1
elif (x2 - x1) == -(y2 - y1) or -(x2 - x1) == -(y2 - y1) or -(x2-x1) == (y2 - y1) or (x2 - x1) == (y1-y1):
movimentos = 1
else:
movimentos = 2
print(movimentos)
except EOFError:
break
| [
"felipefoschiera@googlemail.com"
] | felipefoschiera@googlemail.com |
30e92a99cf36ce5ea416cbd9f679a22208b52323 | e9dc9105d974e0f1a97fc58aac8c456e5035c641 | /Les8/pe8_2.py | 7f0e33d5737134fe39a068575cbc55fd88097233 | [] | no_license | bramvanulden/programming | c840628744ca7d429de3855cb8938b1e98b98ed3 | 86f330152041edf06d5d7cae9c617d93b7c607c5 | refs/heads/master | 2020-03-28T16:04:32.369632 | 2018-10-05T12:10:39 | 2018-10-05T12:10:39 | 148,656,825 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 166 | py | lijst = eval(input("Geef lijst met minimaal 10 strings: "))
nieuwelijst = list()
for c in lijst:
if len(c) <= 4:
nieuwelijst.append(c)
print(nieuwelijst) | [
"42969037+bramvanulden@users.noreply.github.com"
] | 42969037+bramvanulden@users.noreply.github.com |
6f7de31aebfc2c31d20078d9797385969afa52bf | 355b01ae2aad96970828a2cce94cf46fa8ce5c70 | /CleanInventory.py | f33567510bb1cbb2a4812399e8f37669b24ec7e1 | [
"Apache-2.0"
] | permissive | hpkGgPmK/Clean_inventory | c6deab110e465e1d4cfc3a4758a4cf67473201d0 | 5391b92dc5aab7e475370bcbc5e22626c7c955c8 | refs/heads/main | 2023-07-19T08:14:28.001402 | 2021-09-10T12:09:58 | 2021-09-10T12:09:58 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 49,954 | py | # -*- coding: utf-8 -*-
"""
Created on Fri Jan 17 17:57:26 2020
------
-If adding more stores,channels,statuses - more filter values:
1 - need to add the value to the - save_dict,
2 - add more variables - tkinter variables and set variables
3 - add it to the FILTERS menu checkbutton - menu.add_checkbutton
4 - add the .get() funtion, - get_filter_val()
5 - add to the list in - search_func() Store_list
SaleChan_list
Status_list
-------
file_name = os.path.basename(DB_path)
@author: CoilingDragon
"""
from tkinter import *
from PIL import ImageTk,Image
from tkinter import filedialog
import sqlite3 as lite
import json
import time
import shutil # for DB backup
#from csv import writer
import csv
import os
from extraPacks.Utils import *
def get_time_call():
global time_str
time_str = str(time.strftime('%Y-%m-%d %Hh%Mm%Ss', time.localtime(time.time()) ))
return time_str
def update_logger(log_str):
global time_str
get_time_call()
with open(logger_path, "a") as write_file:
write_file.write(time_str +" | "+log_str + '\n')
try:
listNodes_logger.delete(0,END)
with open(logger_path, 'r') as save_file:
for line in save_file:
listNodes_logger.insert(END,(line+" |"))
listNodes_logger.insert(END,('_'))
listNodes_logger.yview_moveto('1.0')
listNodes_logger.config(yscrollcommand=scrollbar.set)
except:
listNodes_logger.insert(END,("Log.txt file path error"))
#--------------------------------------BUTTONS DEFINE-----------------------------------------#
def sqlite3_backup():
"""Create timestamped database copy"""
global time_str
global DB_path
backupdir = 'Backups/'
if not os.path.isdir(backupdir):
raise Exception("Backup directory does not exist: {}".format(backupdir))
backup_file = os.path.join(backupdir,get_time_call()+'_DB_BACKUP.db')
connection = lite.connect(DB_path)
cursor = connection.cursor()
# Lock database before making a backup
cursor.execute('begin immediate')
# Make new backup file
shutil.copyfile(DB_path, backup_file)
# Unlock database
connection.rollback()
label_loading['text'] = ("{}".format(backup_file))
def upload_bycsv():
global time_str
sqlite3_backup()
canvas.filename = filedialog.askopenfilename(initialdir = "c:/", title = "Select CSV", filetypes = (("csv files","*.csv"),("all files","*.*")))
upl_csv_path = canvas.filename
DB_path = DB_filepath_read(BASE_DIR)
if upl_csv_path != '':
con = lite.connect(DB_path)
cur = con.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS items(Row INTEGER PRIMARY KEY AUTOINCREMENT,ASIN TEXT,SKU TEXT,UPC TEXT NOT NULL,Store TEXT,SaleChan TEXT,Status TEXT);""")
path = upl_csv_path
#csv_file = open(path,newline='',encoding='utf-8')
with open(path,newline='',encoding='utf-8') as csv_file:
reader = csv.reader(csv_file)
header = next(reader) #first line is header
data = [row for row in reader] #reads the remaining data
cur.executemany("INSERT INTO items (ASIN,SKU,UPC,Store,SaleChan,Status) VALUES (?,?,?,?,?,?);", data)
#print('uploading finished')
con.commit()
con.close()
lable_ID_querry()
log_str = f"CSV UPLOAD successfully - {path}"
update_logger(log_str)
def upload_by_csv_smart():
global time_str
start_time = time.time()
sqlite3_backup()
canvas.filename = filedialog.askopenfilename(initialdir = "c:/", title = "Select CSV", filetypes = (("csv files","*.csv"),("all files","*.*")))
upl_csv_path = canvas.filename
DB_path = DB_filepath_read(BASE_DIR)
path = upl_csv_path
with open(path,newline='',encoding='utf-8') as csv_file:
reader = csv.reader(csv_file)
header = next(reader) #first line is header
edit_data = [row for row in reader] #reads the remaining data
len_each_errors = []
i = 1
try:
con = lite.connect(DB_path)
cur = con.cursor()
for each in edit_data:
i += 1
if i%150 == 0:
elapsed_time = round((time.time() - start_time),2)
hours_ = elapsed_time//3600
min_ = (elapsed_time%3600)//60
sec_ = (elapsed_time%60)
label_loading['text'] = str(round((i/(len(edit_data))*100),2)) + f'% Loading -- Elapsed time: {int(hours_)}h,{int(min_)}m,{round(sec_,1)}s'
root.update()
if len(each)!=6:
len_each_errors += each
print('error on ----',each)
continue
#edit_Row = int(each[0].strip())#Row
edit_ASIN = each[0].strip()#ASIN
edit_SKU = each[1].strip()#SKU
edit_UPC = each[2].strip()#UPC
#assert ':::' not in edit_UPC
edit_Store = each[3].strip()#Store
#assert ':::' not in edit_Store
edit_SaleChan = each[4].strip()#SaleChan
edit_Status = each[5].strip()#Status
assert edit_Status in save_dict.keys()#-------------------------------remove#
cur.execute("SELECT * FROM items WHERE ASIN=?", (edit_ASIN,))
db_data = cur.fetchone()
#print(db_data)
#FI found just eddit
if db_data != None:
edit_UPC = dict_cell_fixing(db_data[3],edit_UPC)#UPC
edit_Store = dict_cell_fixing(db_data[4],edit_Store)#Store
querry = f"""UPDATE items SET
SKU = "{edit_SKU}",
UPC = "{edit_UPC}",
Store = "{edit_Store}",
SaleChan = "{edit_SaleChan}",
Status = "{edit_Status}"
WHERE ASIN = "{edit_ASIN}";"""
cur.execute(querry)
#FI not found UPLOAD
else:
cur.execute("INSERT INTO items (ASIN,SKU,UPC,Store,SaleChan,Status) VALUES (?,?,?,?,?,?);", [edit_ASIN,edit_SKU,edit_UPC,edit_Store,edit_SaleChan,edit_Status])
con.commit()
con.close()
lable_ID_querry()
log_str = f"MATCHED by csv {upl_csv_path}"#-------------------------------remove#
update_logger(log_str)#-------------------------------remove#
if len_each_errors:
update_logger('\n'.join(len_each_errors))
try:
label_loading['text'] = f'100% Loaded -- Elapsed time: {int(hours_)}h,{int(min_)}m,{round(sec_,1)}s'
except:
pass
except AssertionError as e:
print(e)
log_str = f"AssertionError ERROR IN CSV EDIT {i}, row:{edit_ASIN}---->{e}"
print('ROW:', i)
update_logger(log_str)#-------------------------------remove#
except IndexError as e:
print(e)
log_str = f"IndexError ERROR IN CSV EDIT {i}, row:{edit_ASIN}---->{e}"
print('ROW:', i)
update_logger(log_str)#-------------------------------remove#
def cell_fixing(db_cell,input_cell):# edit_bycsv
cell_fix = []
cell_split = db_cell.split(':::')
first_half = input_cell.split('?')[0]
second_half = '?'.join(input_cell.split('?')[1:])
for cell in cell_split:
if first_half in cell:
cell = first_half +'?'+ second_half
cell_fix.append(cell)
cell_fix = ':::'.join(cell_fix)
return cell_fix
def dict_cell_fixing(db_cell,input_cell):
my_dict = {}
db_cell_split = filter(None, db_cell.split(':::'))
for x in db_cell_split:#building the dict
try:
if x.split('?')[0] in my_dict:
pass
else:
my_dict[x.split('?')[0]] = '?'.join(x.split('?')[1:])
except:
pass
try:#now adding input_cell info to the mix
db_cell_split = filter(None, input_cell.split(':::'))
for x in db_cell_split:
my_dict[x.split('?')[0]] = '?'.join(x.split('?')[1:])
except:
pass
return ':::'.join([k+'?'+my_dict[k] for k in my_dict])
def edit_bycsv():
global time_str
sqlite3_backup()
start_time = time.time()
canvas.filename = filedialog.askopenfilename(initialdir = "c:/", title = "Select CSV", filetypes = (("csv files","*.csv"),("all files","*.*")))
edit_csv_path = canvas.filename
get_time_call()
if edit_csv_path != '':
con = lite.connect(DB_path)
cur = con.cursor()
#cur.execute("""CREATE TABLE IF NOT EXISTS items(Row INTEGER PRIMARY KEY AUTOINCREMENT,ASIN TEXT,SKU TEXT,UPC TEXT NOT NULL,Store TEXT,SaleChan TEXT,Status TEXT);""")
path = edit_csv_path
#csv_file = open(path,newline='',encoding='utf-8')
with open(path,newline='',encoding='utf-8') as csv_file:
reader = csv.reader(csv_file)
header = next(reader) #first line is header
edit_data = [row for row in reader] #reads the remaining data
try:
con = lite.connect(DB_path)
cur = con.cursor()
row_count = 0
for each in edit_data:
row_count +=1
if row_count%150 == 0:
elapsed_time = round((time.time() - start_time),2)
hours_ = elapsed_time//3600
min_ = (elapsed_time%3600)//60
sec_ = (elapsed_time%60)
label_loading['text'] = str(round((row_count/(len(edit_data))*100),2)) + f'% Loading -- Elapsed time: {int(hours_)}h,{int(min_)}m,{round(sec_,1)}s'
root.update()
if ':::' in each[3] or ':::' in each[4]:
item_key = int(each[0].strip())
item_asin = each[1].strip()
item_sku = each[2].strip()
item_upc = each[3].strip()
item_store = each[4].strip()
item_acc = each[5].strip()
item_status = each[6].strip()
assert item_status in save_dict.keys()
#cur.execute("SELECT * FROM items WHERE Row=?", (item_key,))
#db_data = cur.fetchone()
#if db_data != None:
#print(item_key)
querry = f"""UPDATE items SET ASIN = \"{item_asin}\", SKU = \"{item_sku}\", UPC = \"{item_upc}\", Store = \"{item_store}\", SaleChan = \"{item_acc}\", Status = \"{item_status}\" WHERE Row = {item_key}"""
cur.execute(querry)
#con.commit()
else:
edit_Row = int(each[0].strip())#Row
edit_ASIN = each[1].strip()#ASIN
edit_SKU = each[2].strip()#SKU
edit_UPC = each[3].strip()#UPC
edit_Store = each[4].strip()#Store
edit_SaleChan = each[5].strip()#SaleChan
edit_Status = each[6].strip()#Status
assert edit_Status in save_dict.keys()
cur.execute("SELECT * FROM items WHERE Row=?", (edit_Row,))
db_data = cur.fetchone()
#print(db_data)
if db_data != None:
'''edit_Row = db_data[0]#Row
edit_ASIN = db_data[1]#ASIN
edit_SKU = db_data[2]#SKU'''
edit_UPC = dict_cell_fixing(db_data[3],edit_UPC)#UPC
edit_Store = dict_cell_fixing(db_data[4],edit_Store)#Store
'''edit_SaleChan = db_data[5]#SaleChan
edit_Status = db_data[6]#Status'''
#print(edit_Row)
querry = f"""UPDATE items SET ASIN = \"{edit_ASIN}\", SKU = \"{edit_SKU}\", UPC = \"{edit_UPC}\", Store = \"{edit_Store}\", SaleChan = \"{edit_SaleChan}\", Status = \"{edit_Status}\" WHERE Row = {edit_Row}"""
cur.execute(querry)
con.commit()
con.close()
log_str = f"EDITED BY CSV {edit_csv_path}"
try:
label_loading['text'] = f'100% Loaded -- Elapsed time: {int(hours_)}h,{int(min_)}m,{round(sec_,1)}s'
except:
pass
update_logger(log_str)
except Exception as e:
print(e)
log_str = f"ERROR IN CSV EDIT---->{e}"
update_logger(log_str)
def upload_single():
global top_asin_Entry
global top_sku_Entry
global top_upc_Entry
global top_store_Entry
global top_acc_Entry
global top_status_Entry
window_upload_single = Toplevel(root)
window_upload_single.title('Clean Inventory')
window_upload_single.configure(background="black")
window_upload_single.resizable(False, False)
# window_upload_single.geometry("400x400")
top_asin_lable = Label(window_upload_single,text = 'ASIN', width = 10, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
top_sku_lable = Label(window_upload_single,text = 'SKU', width = 10, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
top_upc_lable = Label(window_upload_single,text = 'UPC', width = 10, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
top_store_lable = Label(window_upload_single,text = 'Store', width = 10, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
top_acc_lable = Label(window_upload_single,text = 'SaleChan', width = 10, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
top_status_lable = Label(window_upload_single,text = 'Status', width = 10, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
top_asin_Entry = Entry(window_upload_single,text = 'ASIN', width = 25, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14))
top_sku_Entry = Entry(window_upload_single,text = 'SKU', width = 25, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14))
top_upc_Entry = Entry(window_upload_single,text = 'UPC', width = 25, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14))
top_store_Entry = Entry(window_upload_single,text = 'Store', width = 25, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14))
top_acc_Entry = Entry(window_upload_single,text = 'SaleChan', width = 25, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14))
top_status_Entry = Entry(window_upload_single,text = 'Status', width = 25, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14))
top_button = Button(window_upload_single,text = 'SUBMIT', width = 35, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), command = upload_single_submit_bttn)
top_asin_lable.grid(row=0, column=0 , sticky = S)
top_sku_lable.grid(row=1, column=0 , sticky = S)
top_upc_lable.grid(row=2, column=0 , sticky = S)
top_store_lable.grid(row=3, column=0 , sticky = S)
top_acc_lable.grid(row=4, column=0 , sticky = S)
top_status_lable.grid(row=5, column=0 , sticky = S)
top_asin_Entry.grid(row=0, column=1 , sticky = N)
top_sku_Entry.grid(row=1, column=1 , sticky = N)
top_upc_Entry.grid(row=2, column=1 , sticky = N)
top_store_Entry.grid(row=3, column=1 , sticky = N)
top_acc_Entry.grid(row=4, column=1 , sticky = N)
top_status_Entry.grid(row=5, column=1 , sticky = N)
top_button.grid(row=6, column=0 ,columnspan=2, sticky = N)
def upload_single_submit_bttn():
data_us = []
data_us.append(top_asin_Entry.get())
data_us.append(top_sku_Entry.get())
data_us.append(top_upc_Entry.get())
data_us.append(top_store_Entry.get())
data_us.append(top_acc_Entry.get())
data_us.append(top_status_Entry.get())
top_asin_Entry.delete(0,END)
top_sku_Entry.delete(0,END)
top_upc_Entry.delete(0,END)
top_store_Entry.delete(0,END)
top_acc_Entry.delete(0,END)
top_status_Entry.delete(0,END)
data_us = tuple(data_us)
if data_us[0] != '' and data_us[1] != '' and data_us[2] != '' and data_us[3] != '' and data_us[4] != '' and data_us[5] != '':
con = lite.connect(DB_path)
cur = con.cursor()
cur.execute("""CREATE TABLE IF NOT EXISTS items(Row INTEGER PRIMARY KEY AUTOINCREMENT,ASIN TEXT,SKU TEXT,UPC TEXT NOT NULL,Store TEXT,SaleChan TEXT,Status TEXT);""")
cur.executemany("""INSERT INTO items (ASIN,SKU,UPC,Store,SaleChan,Status) VALUES (?,?,?,?,?,?);""", [data_us])
con.commit()
con.close()
lable_ID_querry()
log_str = f"Uploaded single item with UPC = {data_us[2]}"
update_logger(log_str)
lable_ID_querry()
def lable_ID_querry():
global lable_ID
try:
con = lite.connect(DB_path)
cur = con.cursor()
con = lite.connect(DB_path)
cur = con.cursor()
cur.execute("SELECT Row FROM items")
result = cur.fetchall()
label_ID_inDB['text'] = len(result)
except:
DB_path_btn_color = 'red'
rows_label_text = "0"
label_ID_inDB['text'] = 'err'
con.close()
def DB_path_funct():
global DB_path
#global upl_csv_path
canvas.filename = filedialog.askopenfilename(initialdir = "c:/", title = "Select CSV", filetypes = (("csv files","*.csv"),("all files","*.*")))
DB_path = canvas.filename
root.destroy()
#canvas.destroy()
def get_filter_val():
global save_dict
# Store
save_dict["MF?"] = MF.get()
save_dict["M123?"] = M123.get()
save_dict["WW?"] = WW.get()
save_dict["GC?"] = GC.get()
save_dict["PBA?"] = PBA.get()
save_dict["PAS?"] = PAS.get()
save_dict["MSL?"] = MSL.get()
save_dict["PC?"] = PC.get()
save_dict["SW?"] = SW.get()
save_dict["ZOR?"] = ZOR.get()
save_dict["VIP?"] = VIP.get()
save_dict["SAM?"] = SAM.get()
save_dict["FORZ?"] = FORZ.get()
save_dict["CPO?"] = CPO.get()
save_dict["ABN?"] = ABN.get()
save_dict["PNM?"] = PNM.get()
# SaleChan
save_dict["AmD"] = AmD.get()
save_dict["AmM"] = AmM.get()
save_dict["AmS"] = AmS.get()
#Status
save_dict["redBrands"] = redBrands.get()
save_dict["other"] = other.get()
save_dict["active"] = active.get()
save_dict["actNoRep"] = actNoRep.get()
save_dict["notComp"] = notComp.get()
save_dict["closed"] = closed.get()
save_dict["deleted"] = deleted.get()
save_dict["ToBList"] = ToBList.get()
save_dict["ToBCheckd"] = ToBCheckd.get()
def search_func():
global querry
global count_searches
global result
#------SEARCH BARS
item_asin = entry_ASIN.get()
item_sku = entry_SKU.get()
item_upc = entry_UPC.get()
get_filter_val()
#-----FILTERS
Store_list_querry = ''
SaleChan_list_querry = ''
Status_list_querry = ''
Store_list = ["MF?","WW?","M123?","GC?","PBA?","PAS?","MSL?",
"PC?","SW?","ZOR?","VIP?","SAM?","FORZ?","CPO?","ABN?","PNM?"]
SaleChan_list = ["AmD","AmM","AmS"]
Status_list = ["redBrands","other","active","actNoRep","notComp","closed",
"deleted","ToBList","ToBCheckd"]
c = 0
m = 0
for x in Store_list:
if save_dict[x] == 1:
if c == 0:
Store_list_querry = '('
#print('add to querry')
Store_list_querry = Store_list_querry + f"Store LIKE '%{x}%' OR "
c+=1
if m == (len(Store_list)-1) and c!=0:
Store_list_querry = Store_list_querry[0:(len(Store_list_querry)-4)] +')'
m+=1
c = 0
m = 0
for x in SaleChan_list:
save_dict[x]
if save_dict[x] == 1:
if c == 0:
SaleChan_list_querry = '('
#print('add to querry')
SaleChan_list_querry = SaleChan_list_querry + f"SaleChan LIKE '%{x}%' OR "
c+=1
if m == (len(SaleChan_list)-1) and c!=0:
SaleChan_list_querry = SaleChan_list_querry[0:(len(SaleChan_list_querry)-4)] +')'
m+=1
c = 0
m = 0
for x in Status_list:
save_dict[x]
if save_dict[x] == 1:
if c == 0:
Status_list_querry = '('
#print('add to querry')
Status_list_querry = Status_list_querry + f"Status = '{x}' OR "
c+=1
if m == (len(Status_list)-1) and c!=0:
Status_list_querry = Status_list_querry[0:(len(Status_list_querry)-4)] +')'
m+=1
#print(Store_list_querry+'\n')
#print(SaleChan_list_querry+'\n')
#print(Status_list_querry+'\n')
#print(save_dict)
#------FILTERS
querry = "SELECT * FROM 'items'"
if item_asin != '' or item_sku != '' or item_upc != '':
if item_asin != '':
item_asin_querry = f"(ASIN = '{item_asin}')"
if 'WHERE (' not in querry:
querry = querry +' WHERE ('+ item_asin_querry
else:
querry = " OR".join([querry , item_asin_querry])
if item_sku != '':
item_sku_querry = f"(SKU = '{item_sku}')"
if 'WHERE (' not in querry:
querry = querry +' WHERE ('+ item_sku_querry
else:
querry = " OR".join([querry , item_sku_querry])
if item_upc != '':
item_upc_querry = f"(UPC LIKE '%{item_upc}%')"
if 'WHERE (' not in querry:
querry = querry +' WHERE ('+ item_upc_querry
else:
querry = " OR".join([querry , item_upc_querry])
querry = querry + ")"
if Store_list_querry != '':
if 'WHERE' not in querry:
querry = querry +' WHERE '+ Store_list_querry
else:
querry = " AND".join([querry , Store_list_querry])
if SaleChan_list_querry != '':
if 'WHERE' not in querry:
querry = querry +' WHERE '+ SaleChan_list_querry
else:
querry = " AND".join([querry , SaleChan_list_querry])
if Status_list_querry != '':
if 'WHERE' not in querry:
querry = querry +' WHERE '+ Status_list_querry
else:
querry = " AND".join([querry , Status_list_querry])
'''print(Store_list_querry+'\n')
print(SaleChan_list_querry+'\n')
print(Status_list_querry+'\n')
print(querry)'''
listNodes.delete(0, END)
con = lite.connect(DB_path)
cur = con.cursor()
cur.execute(querry)
result = cur.fetchall()
#print(result)
con.close()
count_searches = 0
for each in result:
listNodes.insert(END,(str(each[0]),'|',each[1],'|',each[2],'|',each[3],'|',each[4],'|',each[5],'|',each[6]))
listNodes.insert(END,('_'))
if count_searches%10000 == 0:#update root windol on every 10k
root.update()
count_searches += 1
label_loading['text'] = str(round((count_searches/len(result)*100),2)) + '% Loading'
if len(result) != 0:
label_loading['text'] = str(round((count_searches/len(result)*100),2)) + '% Loaded'
lable_ID['text'] = count_searches
canvas.delete('canvas_text_6')
canvas.create_text(50,460, anchor = 'nw', text = "|Row_|___ASIN___|____SKU____|__________UPC__________|__Store__|__SLC__|__Status__|",font=("Courier", 18,"underline bold"),fill = 'white', tags=('canvas_text_6',))
def asin_duplicates_funct():
global result
listNodes.delete(0,END)
con = lite.connect(DB_path)
cur = con.cursor()
query = 'SELECT *,COUNT(ASIN) as c FROM items GROUP BY ASIN HAVING ( COUNT(ASIN) > 1 ) ORDER BY c DESC;'
cur.execute(query)
result = cur.fetchall()
con.close()
listNodes.delete(0,END)
count_searches = 0
for each in result:
listNodes.insert(END,(str(each[0]),'|',each[1],'|',each[2],'|',each[3],'|',each[4],'|',each[5],'|',each[6],'|',str(each[7])))
listNodes.insert(END,('_'))
count_searches += 1
lable_ID['text'] = count_searches
canvas.delete('canvas_text_6')
canvas.create_text(50,460, anchor = 'nw', text = "|Row_|___ASIN___|____SKU____|__________UPC__________|__Store__|__SLC__|__Status__|_ASIN_DUPL_|",font=("Courier", 18,"underline bold"),fill = 'white', tags=('canvas_text_6',))
def sku_duplicates_funct():
global result
listNodes.delete(0,END)
con = lite.connect(DB_path)
cur = con.cursor()
query = 'SELECT *,COUNT(SKU) as c FROM items GROUP BY SKU HAVING ( COUNT(SKU) > 1 ) ORDER BY c DESC;'
cur.execute(query)
result = cur.fetchall()
con.close()
listNodes.delete(0,END)
count_searches = 0
for each in result:
listNodes.insert(END,(str(each[0]),'|',each[1],'|',each[2],'|',each[3],'|',each[4],'|',each[5],'|',each[6],'|',str(each[7])))
listNodes.insert(END,('_'))
count_searches += 1
lable_ID['text'] = count_searches
canvas.delete('canvas_text_6')
canvas.create_text(50,460, anchor = 'nw', text = "|Row_|___ASIN___|____SKU____|__________UPC__________|__Store__|__SLC__|__Status__|_SKU_DUPL__|",font=("Courier", 18,"underline bold"),fill = 'white', tags=('canvas_text_6',))
def csv_search_func():
global querry
global time_str
csv_search_result = []
csv_notfound_result = []
#------SEARCH BARS
start_time = time.time()
canvas.filename = filedialog.askopenfilename(initialdir = "c:/", title = "Select CSV", filetypes = (("csv files","*.csv"),("all files","*.*")))
path = canvas.filename
line_count = sum(1 for line in open(path))-1
with open(path,'r',newline='',encoding='utf-8') as csv_file:
reader = csv.reader(csv_file)
row_count = 0
header = next(reader) #first line is header
for row in reader: #reads the remaining data
item_asin = row[0]
item_sku = row[1]
item_upc = row[2]
#------FILTERS
querry = "SELECT * FROM 'items'"
con = lite.connect(DB_path)
cur = con.cursor()
if item_asin != '' or item_sku != '' or item_upc != '':
if item_asin != '':
item_asin_querry = f"(ASIN = '{item_asin}')"
if 'WHERE (' not in querry:
querry = querry +' WHERE ('+ item_asin_querry
else:
querry = " OR".join([querry , item_asin_querry])
if item_sku != '':
item_sku_querry = f"(SKU = '{item_sku}')"
if 'WHERE (' not in querry:
querry = querry +' WHERE ('+ item_sku_querry
else:
querry = " OR".join([querry , item_sku_querry])
if item_upc != '':
item_upc_querry = f"(UPC LIKE '%{item_upc}%')"
if 'WHERE (' not in querry:
querry = querry +' WHERE ('+ item_upc_querry
else:
querry = " OR".join([querry , item_upc_querry])
querry = querry + ")"
cur.execute(querry)
result = cur.fetchall()
if row_count%15 == 0:
root.update()
elapsed_time = round((time.time() - start_time),2)
hours_ = elapsed_time//3600
min_ = (elapsed_time%3600)//60
sec_ = (elapsed_time%60)
label_loading['text'] = str(round((row_count/(line_count)*100),2)) + f'% Loading -- Elapsed time: {int(hours_)}h,{int(min_)}m,{round(sec_,1)}s'
#print("\n",int(hours_),'Hours,',int(min_),'min,',round(sec_,1),'sec.')
row_count +=1
if result == []:
csv_notfound_result = csv_notfound_result + [(item_asin,item_sku,item_upc)]
else:
csv_search_result = csv_search_result + result
if line_count !=0:
label_loading['text'] = str(round((row_count/(line_count)*100),2)) + f'% Loaded -- Elapsed time: {int(hours_)}h,{int(min_)}m,{round(sec_,1)}s'
csv_search_result = list( dict.fromkeys(csv_search_result))
con.close()
get_time_call()
file_name = time_str +" CSV SEARCH.csv"
csv_export = DB_filepath_strip(DB_path,file_name)
with open(csv_export,'w',newline='',encoding='utf-8') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
headers = ['Row', 'ASIN', 'SKU','UPC', 'Store', 'SaleChan', 'Status']
csv_writer.writerow(headers)
for each in csv_search_result:
str_0 = each[0]#Row
str_1 = each[1]#ASIN
str_2 = each[2]#SKU
str_3 = each[3]#UPC
str_4 = each[4]#Store
str_5 = each[5]#SaleChan
str_6 = each[6]#Status
csv_writer.writerow([str_0, str_1, str_2,str_3,str_4,str_5,str_6])
file_name = time_str +" CSV SEARCH NOT FOUND.csv"
csv_export = DB_filepath_strip(DB_path,file_name)
with open(csv_export,'w',newline='',encoding='utf-8') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
headers = ['ASIN', 'SKU','UPC']
csv_writer.writerow(headers)
for each in csv_notfound_result:
str_0 = each[0]
str_1 = each[1]
str_2 = each[2]
csv_writer.writerow([str_0, str_1, str_2])
def export_csv_func():
global time_str
get_time_call()
print (time_str)
file_name = str(time_str) +" CSV export.csv"
csv_export = DB_filepath_strip(DB_path,file_name)
print(csv_export,"<----------")
with open(csv_export,'w',newline='',encoding='utf-8') as csv_file:
csv_writer = csv.writer(csv_file, delimiter=',')
headers = ['Row', 'ASIN', 'SKU','UPC', 'Store', 'SaleChan', 'Status']
csv_writer.writerow(headers)
for each in result:
str_0 = each[0]#Row
str_1 = each[1]#ASIN
str_2 = each[2]#SKU
str_3 = each[3]#UPC
str_4 = each[4]#Store
str_5 = each[5]#SaleChan
str_6 = each[6]#Status
csv_writer.writerow([str_0, str_1, str_2,str_3,str_4,str_5,str_6])
listNodes.delete(0,END)
def edit_single():
entry_var = entry_edit.get()
entry_edit.delete(0,END)
edit_single_inner(entry_var)
#print('edit single')
def edit_single_inner(x):
#item_key,item_asin,item_sku,item_upc,item_store,item_acc,item_status
global entry_edit
try:
#x = x.replace(" ","")
x = x.split(" | ")
item_key = int(x[0])
if item_key =='':
raise Exception()
item_asin = x[1].strip()
item_sku = x[2].strip()
item_upc = x[3].strip()
item_store = x[4].strip()
item_acc = x[5].strip()
item_status = x[6].strip()
assert item_status in save_dict.keys()
# ADD all querry for editing hire
con = lite.connect(DB_path)
cur = con.cursor()
querry = f"""UPDATE items SET ASIN = \"{item_asin}\", SKU = \"{item_sku}\", UPC = \"{item_upc}\", Store = \"{item_store}\", SaleChan = \"{item_acc}\", Status = \"{item_status}\" WHERE Row = {item_key}"""
cur.execute(querry)
con.commit()
con.close()
log_str = f"""EDITED single SKU = {item_sku}, ASIN = {item_asin}"""
update_logger(log_str)
except:
entry_edit.insert(0,"""Row | ASIN | SKU | UPC | Store | SLC | Status -= Row or Status may have typos =-""")
def list_doubleclick_handler(event):
global ACTIVE_handle
ACTIVE_handle = listNodes.get(ACTIVE)
entry_edit.delete(0,END)
item_str = ' '.join(ACTIVE_handle)
entry_edit.insert(0,item_str)
#--------------------------------------BUTTONS DEFINE-----------------------------------------#
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
save_dict = load_json_file(BASE_DIR)
DB_path = DB_filepath_read(BASE_DIR)
global time_str
time_str = get_time_call()
root = Tk()
root.title('Clean Inventory')
root.configure(background="black")
root.resizable(False, False)
#root.geometry("400x400")
try: #conn test
con = lite.connect(DB_path)
cur = con.cursor()
cur.execute("SELECT Row FROM items")
result = cur.fetchall()
rows_label_text = len(result)
con.close()
DB_path_btn_color = 'silver'
del result
except:
DB_path_btn_color = 'red'
rows_label_text = "0"
#--------------------------------------CANVAS-----------------------------------------#
back_ground = ImageTk.PhotoImage(Image.open('Images/1600_935_bg.jpg'))
canvas = Canvas(root, width=1600, height=935)
canvas.pack(side=LEFT, fill=BOTH, expand=True)
canvas.configure(background="black")
canvas.create_image(0, 0, image = back_ground, anchor=N+W)
#--------------------------------------CANVAS-----------------------------------------#
#--------------------------------------LOGGER-----------------------------------------#
logg_frame = Frame(canvas)
canvas.create_window(900,100, anchor = 'nw',window=logg_frame)
canvas.create_text(900,60, anchor = 'nw', text = "|___DATE-TIME___|______LOGGER______|",font=("Courier", 18,"underline bold"),fill = 'white', tags=('canvas_text_7',))
file_name = "Log.txt"
logger_path = DB_filepath_strip(DB_path,file_name)
#logger_path = DB_path.split("/")
#logger_path = logger_path[0:(len(logger_path)-1)] + ["Log.txt"]
#logger_path = "/".join(logger_path)
listNodes_logger = Listbox(logg_frame, width=65, height=10, font=("Courier", 12 , "underline bold"),bg = 'black',fg= "white", selectforeground= "black" ,selectbackground='white')
listNodes_logger.pack(side="left", fill="y")
try:
with open(logger_path, 'r') as save_file:
for line in save_file:
listNodes_logger.insert(END,(line+" |"))
listNodes_logger.insert(END,('_'))
except:
listNodes_logger.insert(END,("Log.txt file path error"))
#for x in range(30):
# listNodes.insert(END,(str(x),'|','date-time','|','EVENT-TO-BE-LOGGED','|'))
# listNodes.insert(END,('_'))
#Listbox.delete(0, END)
#Listbox.insert(END, newitem)
scrollbar = Scrollbar(logg_frame, orient="vertical")
scrollbar.config(command=listNodes_logger.yview)
scrollbar.pack(side="right", fill="y")
listNodes_logger.yview_moveto('1.0')
listNodes_logger.config(yscrollcommand=scrollbar.set)
#--------------------------------------LOGGER-----------------------------------------#
#--------------------------------------MAIN PREVEW------------------------------------#
frame = Frame(canvas)
canvas.create_window(50,500, anchor = 'nw',window=frame)
canvas.create_text(50,460, anchor = 'nw', text = "|Row_|___ASIN___|____SKU____|__________UPC__________|__Store__|__SLC__|__Status__|",font=("Courier", 18,"underline bold"),fill = 'white', tags=('canvas_text_6',))
listNodes = Listbox(frame, width=115, height=15, font=("Courier", 16 , "underline bold"),bg = 'black',fg= "white", selectforeground= "black" ,selectbackground='white')
listNodes.pack(side="left", fill="y")
listNodes.bind('<Double-Button-1>', list_doubleclick_handler)
scrollbar = Scrollbar(frame, orient="vertical")
scrollbar.config(command=listNodes.yview)
scrollbar.pack(side="right", fill="y")
listNodes.config(yscrollcommand=scrollbar.set)
#--------------------------------------MAIN PREVEW------------------------------------#
#--------------------------------------FILTERS menu checkbutton-----------------------#
filter_frame = Frame(canvas)
canvas.create_window(350,60, anchor = 'nw',window=filter_frame)
#canvas_text = canvas.create_text(830,70, anchor = 'nw', text = ">>DATA BASE FILTERS<<",font=("Helvetica", 14),fill = 'white', tags=('canvas_text_2',))
# tk checkbox values-----------
# Store -- tkinter variables
MF = IntVar() #0
WW = IntVar() #1
M123 = IntVar() #2
GC = IntVar() #3
PBA = IntVar() #4
PAS = IntVar() #5
MSL = IntVar() #6
PC = IntVar() #7
SW = IntVar() #8
ZOR = IntVar() #9
VIP = IntVar() #10
SAM = IntVar() #11
FORZ = IntVar() #12
CPO = IntVar() #13
ABN = IntVar() #14
PNM = IntVar() #15
# Store -- set variables
MF.set(int(save_dict["MF?"])) #0
WW.set(int(save_dict["WW?"])) #1
M123.set(int(save_dict["M123?"])) #2
GC.set(int(save_dict["GC?"])) #3
PBA.set(int(save_dict["PBA?"])) #4
PAS.set(int(save_dict["PAS?"])) #5
MSL.set(int(save_dict["MSL?"])) #6
PC.set(int(save_dict["PC?"])) #7
SW.set(int(save_dict["SW?"])) #8
ZOR.set(int(save_dict["ZOR?"])) #9
VIP.set(int(save_dict["VIP?"])) #10
SAM.set(int(save_dict["SAM?"])) #11
FORZ.set(int(save_dict["FORZ?"])) #12
CPO.set(int(save_dict["CPO?"])) #13
ABN.set(int(save_dict["ABN?"])) #14
PNM.set(int(save_dict["PNM?"])) #14
# SaleChan
AmD = IntVar()
AmM = IntVar()
AmS = IntVar()
AmD.set(int(save_dict["AmD"]))
AmM.set(int(save_dict["AmM"]))
AmS.set(int(save_dict["AmS"]))
#Status
active = IntVar()
active.set(int(save_dict["active"]))
actNoRep = IntVar()
actNoRep.set(int(save_dict["actNoRep"]))
notComp = IntVar()
notComp.set(int(save_dict["notComp"]))
closed = IntVar()
closed.set(int(save_dict["closed"]))
redBrands = IntVar()
redBrands.set(int(save_dict["redBrands"]))
other =IntVar()
other.set(int(save_dict["other"]))
deleted = IntVar()
deleted.set(int(save_dict["deleted"]))
ToBList = IntVar()
ToBList.set(int(save_dict["ToBList"]))
ToBCheckd = IntVar()
ToBCheckd.set(int(save_dict["ToBCheckd"]))
# menue define----------
# Store
Store_menue = Menubutton ( filter_frame, text="Store Filter",padx = 22,bg = 'silver',activebackground='white', borderwidth=5,font=("Helvetica", 14), relief=RAISED )
Store_menue.menu = Menu ( Store_menue, tearoff = 1, bg = 'white',activebackground='black')
Store_menue["menu"] = Store_menue.menu
Store_menue.pack(side="left", fill="y")
Store_menue.menu.add_checkbutton ( label="GC ", variable = GC)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="MF", variable = MF)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="WW", variable = WW)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="M123", variable = M123)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="PBA", variable = PBA)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="PAS", variable = PAS)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="MSL", variable = MSL)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="PC", variable = PC)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="SW", variable = SW)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="ZOR", variable = ZOR)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="VIP", variable = VIP)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="SAM", variable = SAM)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="FORZ", variable = FORZ)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="CPO", variable = CPO)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="ABN", variable = ABN)
Store_menue.menu.add_separator()
Store_menue.menu.add_checkbutton ( label="PNM", variable = PNM)
#SaleChan
SaleChan_menue = Menubutton ( filter_frame, text="SaleChan Filter",padx = 22,bg = 'silver',activebackground='white',borderwidth=5,font=("Helvetica", 14), relief=RAISED )
SaleChan_menue.menu = Menu ( SaleChan_menue, tearoff = 1, bg = 'white',activebackground='black')
SaleChan_menue["menu"] = SaleChan_menue.menu
SaleChan_menue.pack(side="left", fill="y")
SaleChan_menue.menu.add_checkbutton ( label="AmD ", variable = AmD)
SaleChan_menue.menu.add_separator()
SaleChan_menue.menu.add_checkbutton ( label="AmM", variable = AmM)
SaleChan_menue.menu.add_separator()
SaleChan_menue.menu.add_checkbutton ( label="AmS", variable = AmS)
#Status
Status_menue = Menubutton ( filter_frame, text="Status Filter",padx = 22, bg = 'silver',activebackground='white', borderwidth=5,font=("Helvetica", 14), relief=RAISED )
Status_menue.menu = Menu ( Status_menue, tearoff = 1, bg = 'white',activebackground='black')
Status_menue["menu"] = Status_menue.menu
Status_menue.pack(side="left", fill="y")
Status_menue.menu.add_checkbutton ( label="active ", variable = active)
Status_menue.menu.add_separator()
Status_menue.menu.add_checkbutton ( label="actNoRep", variable = actNoRep)
Status_menue.menu.add_separator()
Status_menue.menu.add_checkbutton ( label="notComp", variable = notComp)
Status_menue.menu.add_separator()
Status_menue.menu.add_checkbutton ( label="closed", variable = closed)
Status_menue.menu.add_separator()
Status_menue.menu.add_checkbutton ( label="redBrands", variable = redBrands)
Status_menue.menu.add_separator()
Status_menue.menu.add_checkbutton ( label="deleted", variable = deleted)
Status_menue.menu.add_separator()
Status_menue.menu.add_checkbutton ( label="other", variable = other)
Status_menue.menu.add_separator()
Status_menue.menu.add_checkbutton ( label="ToBList", variable = ToBList)
Status_menue.menu.add_separator()
Status_menue.menu.add_checkbutton ( label="ToBCheckd", variable = ToBCheckd)
#--------------------------------------FILTERS menu checkbutton-----------------------#
#--------------------------------------Buttons------------------------------------#
btn_img = ImageTk.PhotoImage(Image.open('Images/60_60_buttn.jpg'))
csv_upl_img = ImageTk.PhotoImage(Image.open('Images/60x60upl_csv.jpg'))
csv_edit_img = ImageTk.PhotoImage(Image.open('Images/60x60edit_btn.png'))
upload_bysingle_btn = Button(canvas,image=btn_img, bg = 'black',activebackground='silver', command=upload_single)
upload_bycsv_btn = Button(canvas,image=csv_upl_img, bg = 'black',activebackground='silver', command=upload_by_csv_smart)
edit_bycsv_btn = Button(canvas,image=csv_edit_img, bg = 'black',activebackground='silver', command=edit_bycsv)
edit_one_btn = Button(canvas, text = "EDIT", bg = DB_path_btn_color ,activebackground='white',width = 15, borderwidth=5,font=("Helvetica", 11, "bold"), command=edit_single)
DB_path_btn = Button(canvas, text = "SET DB", bg = DB_path_btn_color ,activebackground='white',width = 15,height=2, borderwidth=5,font=("Helvetica", 11, "bold"), command=DB_path_funct)
DB_backup_btn = Button(canvas, text = "BACKUP DB", bg = DB_path_btn_color ,activebackground='white',width = 11,height=2, borderwidth=5,font=("Helvetica", 11, "bold"), command=sqlite3_backup)
search_btns_frame = Frame(canvas)
search_btn = Button(search_btns_frame, text = "SEARCH", bg = DB_path_btn_color ,activebackground='white',width = 10, borderwidth=5,font=("Helvetica", 11, "bold"), command=search_func)
search_csv_btn = Button(search_btns_frame, text = "CSV SEARCH", bg = DB_path_btn_color ,activebackground='white',width = 11, borderwidth=5,font=("Helvetica", 11, "bold"), command=csv_search_func)
export_csv_btn = Button(search_btns_frame, text = "EXPORT SEARCH", bg = 'silver',activebackground='white',width = 16, borderwidth=5,font=("Helvetica", 11, "bold"), command=export_csv_func)
search_btn.pack(side="left", fill="y")
search_csv_btn.pack(side="left", fill="y")
export_csv_btn.pack(side="left", fill="y")
duplicate_btns_frame = Frame(canvas)
asin_dupl_btn = Button(duplicate_btns_frame, text = "ASIN", bg = DB_path_btn_color ,activebackground='white',width = 4, borderwidth=5,font=("Helvetica", 11, "bold"), command=asin_duplicates_funct)
upc_dupl_btn = Button(duplicate_btns_frame, text = "SKU", bg = DB_path_btn_color ,activebackground='white',width = 4, borderwidth=5,font=("Helvetica", 11, "bold"), command=sku_duplicates_funct)
asin_dupl_btn.pack(side="left", fill="y")
upc_dupl_btn.pack(side="left", fill="y")
canvas.create_window(50,350, anchor = 'nw',window = duplicate_btns_frame)
canvas.create_window(818,425, anchor = 'nw',window = search_btns_frame)
canvas.create_window(50,60, anchor = 'nw',window = upload_bysingle_btn)
canvas.create_window(50,160, anchor = 'nw',window = upload_bycsv_btn)
canvas.create_window(50,260, anchor = 'nw',window = edit_bycsv_btn)
canvas.create_window(1400,880, anchor = 'nw',window = edit_one_btn)
canvas.create_window(1400,425, anchor = 'nw',window = DB_path_btn)
canvas.create_window(1260,425, anchor = 'nw',window = DB_backup_btn)
canvas.create_text(100,80, anchor = 'nw', text = ">>>UPLOAD ONE",font=("Helvetica", 14),fill = 'white', tags=('canvas_text_1',))
#canvas.create_text(100,180, anchor = 'nw', text = ">>>UPLOAD by CSV",font=("Helvetica", 14),fill = 'white', tags=('canvas_text_2',))
#canvas.create_text(100,280, anchor = 'nw', text = ">>>EDIT by CSV",font=("Helvetica", 14),fill = 'white', tags=('canvas_text_3',))
#--------------------------------------Buttons------------------------------------#
#--------------------------------------Entry,labels-------------------------------#
search_frame = Frame(canvas)
lable_ID = Label(search_frame,text= "0", width = 6, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
entry_ASIN = Entry(search_frame, width = 13, borderwidth=5, bg = 'silver',font=("Helvetica", 14))
entry_SKU = Entry(search_frame, width = 14, borderwidth=5, bg = 'silver',font=("Helvetica", 14))
entry_UPC = Entry(search_frame, width = 30, borderwidth=5, bg = 'silver',font=("Helvetica", 14))
lable_ID.pack(side="left", fill="y")
entry_ASIN.pack(side="left", fill="y")
entry_SKU.pack(side="left", fill="y")
entry_UPC.pack(side="left", fill="y")
entry_edit = Entry(canvas, width = 120, borderwidth=5, bg = 'silver',font=("Helvetica", 14))
entry_edit.insert(0,'ID | ASIN | SKU | UPC | Store | SLC | Status')
label_ID_inDB = Label(canvas,text=rows_label_text, width = 6, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
label_loading = Label(canvas,text='loading label', width = 43, borderwidth=4, bg = 'white',fg = 'black',font=("Helvetica", 14), relief=RAISED)
canvas.create_window(50,390, anchor = 'nw',window = label_ID_inDB)
canvas.create_text(150,355, anchor = 'nw', text = ">>SHOW DUPLICATES",font=("Helvetica", 14),fill = 'white', tags=('canvas_text_8',))
canvas.create_text(130,393, anchor = 'nw', text = ">>>>RECORDS IN DB",font=("Helvetica", 14),fill = 'white', tags=('canvas_text_8',))
canvas.create_window(50,425, anchor = 'nw',window=search_frame)
canvas.create_window(50,880, anchor = 'nw',window = entry_edit)
canvas.create_window(350,110, anchor = 'nw',window = label_loading)
#--------------------------------------Entry,labels-----------------------------------#
mainloop()
#--------------------------------------End of program--------------------------------#
#get the filter values befor closeing the app
get_filter_val()
save_json_file(BASE_DIR)
DB_filepath_write(BASE_DIR,DB_path)
try:
sqlite3_backup()
if con:
con.close()
except:
pass
#"""
| [
"noreply@github.com"
] | noreply@github.com |
2c415f71901095e71620872ae59a2132fcf32c17 | 8860414fc5e9f62d190897fedf8422fb727aab65 | /cakeclub/cakepage/admin.py | d41472a28f1606c5e965fe6f3788c575b804cc6a | [] | no_license | gislenelima/cakeclub | f58cdfe80bbc2a521292e60aabc0a2cdf0a7849a | 67ee72b4dbc51c6c5cd37c23a5ce54019c82c76c | refs/heads/master | 2020-03-17T07:35:10.543008 | 2018-05-28T17:16:35 | 2018-05-28T17:16:35 | 133,404,505 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 588 | py | from django.contrib import admin
from .models import DuplaDaVez, CakePool, Cakes, Dicas
# Register your models here.
#mostra insformações no admin no formato abaixo
@admin.register(DuplaDaVez)
class DuplaDaVezAdmin(admin.ModelAdmin):
list_display = ('id','nome_um','nome_dois','date_da_dupla')
@admin.register(Cakes)
class CakesAdmin(admin.ModelAdmin):
list_display = ('name',)
@admin.register(CakePool)
class CakePoolAdmin(admin.ModelAdmin):
list_display = ('entry_date',)
@admin.register(Dicas)
class DicasAdmin(admin.ModelAdmin):
list_display = ('titulo',)
| [
"vanessa@fiberlink.net.br"
] | vanessa@fiberlink.net.br |
91447ef9280413cee1102ffc1bc42da40377bca5 | 43c8f49b9a6d357435d8ac96f8769c4b7ce9ed4a | /code/geometry_plotters_2D.py | c5aabcc6f0d43b91e4614ab1e9758476f056b195 | [] | no_license | Bojarov/08_OPTI_WELD_FH | c9e3c2e47911324eeeed57846db58c9e36c5a598 | 7a06914d222253c7153a4820203dab8a0e254b38 | refs/heads/master | 2023-03-28T22:31:19.642534 | 2021-04-12T14:52:34 | 2021-04-12T14:52:34 | 335,680,353 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,708 | py | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as patches
import code.geometry_builders as gb
from matplotlib import rc, font_manager
import matplotlib.patches as patches
sizeOfFont = 12
fontProperties = {'family': 'sans-serif', 'sans-serif': ['Helvetica'],
'weight': 'normal', 'size': sizeOfFont}
ticks_font = font_manager.FontProperties(family='Helvetica', style='normal',
size=sizeOfFont, weight='normal', stretch='normal')
rc('text', usetex=True)
rc('font', **fontProperties)
def wire_mesh_2D_plot(params, damage_params, segment_list, pts):
"""
make a 2D plot of the mesh in the cross section of the wire/tube
for better understanding
input: list of segments forming the tube which is build in
geometry_builders
"""
ro_t, ri_t, flen, node_dens, sigma, mu_0, mu_r, freq = params # unpack
rd_i, rd_o, d_weld, l_weld, phi_c_weld, sigma_damage = damage_params
fig_mesh2d, ax_mesh2D = plt.subplots()
ax_mesh2D.set_aspect('equal')
plt.xlim(-(ro_t + 0.1 * ro_t), (ro_t + 0.1 * ro_t))
plt.ylim(-(ro_t + 0.1 * ro_t), (ro_t + 0.1 * ro_t))
# plot outer and inner surface of the pipe
circle_o2 = plt.Circle((0, 0), ro_t, color='b', fill=False)
circle_i2 = plt.Circle((0, 0), ri_t, color='b', fill=False)
ax_mesh2D.add_patch(circle_o2)
ax_mesh2D.add_patch(circle_i2)
for i in pts:
j = pts.index(i)
sigma_segment = segment_list[j][6]
if sigma_segment == sigma_damage:
color = 'red'
else:
color = 'blue'
rect = patches.Rectangle(
tuple(np.subtract(i, (0.5 * ro_t / (node_dens - 1), 0.5 * ro_t / (node_dens - 1)))),
ro_t / (node_dens - 1), ro_t / (node_dens - 1), linewidth=1.5 * (ro_t - ri_t) / node_dens,
edgecolor='black', facecolor=color, alpha=0.5
)
ax_mesh2D.add_patch(rect)
plt.scatter(*zip(*pts), marker='o', s=10, color='green', zorder=2)
ax_mesh2D.title.set_text(r'Conductor cross section and filaments')
ax_mesh2D.set_xlabel(r'$x[m]$')
ax_mesh2D.set_ylabel('$y[m]$')
plt.tight_layout()
plt.grid()
plt.show()
def wire_mesh_2D_plot_dyn(ro_t, ri_t, r_sub_vec, node_dens_vec, params,
filament_params, damage_params, l_sub_vec):
"""
make a 2D plot of the mesh in the cross section of the wire
for better understanding
"""
# hierarchy of geometry objects
# tube->shell->segments->nodes
tube_node_lists = [] # lists to carry the node and segment lists for each shell
tube_segment_lists = []
tube_pts_lists = []
# build the geometry in python from the input params
gb.tube_builder(ro_t, ri_t, r_sub_vec, l_sub_vec, node_dens_vec,
params, damage_params, filament_params, tube_node_lists,
tube_segment_lists, tube_pts_lists)
ro_t, ri_t, flen, node_dens, sigma, mu_0, mu_r, freq = params
rd_i, rd_o, d_weld, l_weld, phi_c_weld, sigma_damage = damage_params
n_segments = len(tube_segment_lists[0]) + len(tube_segment_lists[1]) + len(tube_segment_lists[2])
fig2, ax2 = plt.subplots()
ax2.set_aspect('equal')
plt.xlim(-(ro_t + 0.1 * ro_t), (ro_t + 0.1 * ro_t))
plt.ylim(-(ro_t + 0.1 * ro_t), (ro_t + 0.1 * ro_t))
shell_colors = ['blue', 'green']
# plot outer and inner surface of the pipe
for i in r_sub_vec:
circle = plt.Circle((0, 0), i, color='b', fill=False)
ax2.add_patch(circle)
for pts in tube_pts_lists:
i = tube_pts_lists.index(pts)
segment_list = tube_segment_lists[i]
node_dens = node_dens_vec[i]
for pt in pts:
j = pts.index(pt)
#
# if j==0:
# print(len(l_sub_vec))
# print(len(pts))
# print(len(segment_list)/3)
# print(len(segment_list))
# print(segment_list[117])
# print(j*(len(l_sub_vec)-1))
fil_ind = (j * (len(l_sub_vec) - 1))
# sigma_segment = segment_list[j][6]
# w = segment_list[j][2]
sigma_segment = segment_list[fil_ind][6]
w = segment_list[fil_ind][2]
if sigma_segment == sigma_damage and sigma_damage != sigma:
color = 'red'
else:
color = shell_colors[i % 2]
rect = patches.Rectangle(
tuple(np.subtract(pt, (0.5 * w, 0.5 * w))),
w, w, linewidth=1.5 * (ro_t - ri_t) / node_dens,
edgecolor='black', facecolor=color, alpha=0.5
)
ax2.add_patch(rect)
if len(pts) > 0:
plt.scatter(*zip(*pts), marker='o', s=10, color=shell_colors[i % 2], zorder=2)
ax2.title.set_text(r'Conductor cross section and filaments')
ax2.set_xlabel(r'$x[m]$')
ax2.set_ylabel('$y[m]$')
plt.tight_layout()
plt.grid()
print("The geometry is made of " + str(n_segments) + " segments.")
def loop_plane_2d(a, b, n_a, n_b):
#TODO finish the loop surface
x_p = np.linspace(-a / 2, a / 2, 2)
y_p = np.linspace(-b / 2, b / 2, 2)
plane_crnrs = np.array(np.meshgrid(x_p, y_p)).T.reshape(-1, 2)
print(plane_crnrs)
rect = patches.Rectangle((-a / 2, -b / 2), a, b, linewidth=1, edgecolor='r', facecolor='none')
# loop nodes
w_a = b / (n_b + 1)
w_b = a / (n_a + 1)
x = np.linspace((-a + w_b) / 2, (a - w_b) / 2, n_a + 1)
y = np.linspace((-b + w_a) / 2, (b - w_a) / 2, n_b + 1)
loop_nodes = np.array(np.meshgrid(x, y, [0])).T.reshape(-1, 3)
# contact nodes
x_c = loop_nodes[0:-(n_b+1), 0]
y_c = loop_nodes[0:-(n_b+1), 1]
#x_c_f = loop_nodes[1::2, 0]+0.1
x_c_f = x_c[1::2]+0.1
#y_c_f = loop_nodes[1::2, 1]
y_c_f = y_c[1::2]
#x_c_f = x_c[:-2] + 0.1
#y_c_f = y_c[:-2]
# loop segments
#mask = np.ones(x_c.size, dtype=bool)
#mask[::n_b + 1] = 0
x_links = []
##print(x_c[mask])
# exit()
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(loop_nodes[:, 0], loop_nodes[:, 1], color='blue')
# ax.scatter(loop_nodes[::2, 0], loop_nodes[::2, 1], color='yellow')
ax.scatter(x_c_f, y_c_f, color='yellow')
# ax.plot(loop_nodes[::2, 0], loop_nodes[::2, 1], color='orange')
ax.plot(loop_nodes[:, 0], loop_nodes[:, 1], color='green', linestyle='--')
ax.scatter(plane_crnrs[:, 0], plane_crnrs[:, 1], color='red')
# ax.plot(plane_crnrs[:, 0], plane_crnrs[:, 1], color='red')
# ax.plot(x_c[mask], y_c[mask], color='black')
ax.add_patch(rect)
ax.axis('equal')
plt.show()
| [
"bogusz.bujnowski@gmail.com"
] | bogusz.bujnowski@gmail.com |
d7494662981ee5a36d8738b3a31e9f10a40eafe1 | f4a65213980f30daad5f6c8a8fb7103840a77e84 | /tealiumtest/pageParser.py | 8d4e86d8092a10669fb27ce9401c36a963abf4f1 | [] | no_license | Zenodia/Python | ebbf27b049322a5ccf6db4076767bf81767a2fc2 | eb94233b6879157d9fa4e68f3ac6fd5efd1e0ee0 | refs/heads/master | 2021-01-10T14:46:19.943866 | 2016-02-17T20:00:29 | 2016-02-17T20:00:29 | 51,947,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,116 | py | import httpagentparser
#print httpagentparser.simple_detect(s)
#print httpagentparser.detect(s)
import json
from pprint import pprint
import itertools
import operator
from operator import itemgetter
import math
import datetime
# import tealium.json file to python and extract sample data
data = []
with open('./data/tealium.json') as f:
for line in f:
data.append(json.loads(line))
print " length of the data : %d" % len(data)
#extract sample data by name of the key , ex : "visitorid"
#print data[0]["visitorid"]
#sample=data[0]["useragent"]
#print type(s)
#print httpagentparser.simple_detect(s)
#parse all browser data from useragent
#print "before"
#print data[0]["useragent"]
#convert useragent into simple browser version
l=len(data)
for i in range(0,l):
s=httpagentparser.simple_detect(data[i]["useragent"])
data[i]["useragent"]=s
#print "after"
#print data[0]["useragent"]
# count unique users
num_visitorid=[]
for i in range(0,l):
num_visitorid.append(data[i]["visitorid"])
num_visitorid=list(set(num_visitorid))
print " number of unique visitorid : %d" % len(num_visitorid)
# get list of event per_visitorid
gpby_vid={}
for key,group in itertools.groupby(sorted(data),operator.itemgetter("visitorid")):
gpby_vid[key]=list(group)
#print type(gpby_vid["015014ec67df001848a322999e9e0606d0033065009dc"][0])
#print gpby_vid["015014ec67df001848a322999e9e0606d0033065009dc"][0]["eventtime"]
#print type(gpby_vid)
# see how many visitor id has only one event
shortdict={}
for vid in num_visitorid:
ls=[]
for k,v in gpby_vid[vid][0].iteritems():
if k=="useragent":
temp=(gpby_vid[vid][0]["eventtime"], gpby_vid[vid][0]["useragent"])
ls.append(temp)
shortdict[vid]=ls
#print shortdict["015014ec67df001848a322999e9e0606d0033065009dc"][0][1][0]
#print shortdict["015014ec67df001848a322999e9e0606d0033065009dc"][0][1][1]
#print len(shortdict["015014ec67df001848a322999e9e0606d0033065009dc"]
# get list of devices used by visitor
device=[]
browser=[]
for vid in num_visitorid:
for item in shortdict[vid][0]:
device.append(shortdict[vid][0][1][0])
browser.append(shortdict[vid][0][1][1])
device=sorted(list(set(device)))
#print device
browser=sorted(list(set(browser)))
#print browser
device_d={}
browser_d={}
for dev in device:
count=0
for vid in num_visitorid:
if shortdict[vid][0][1][0]==dev:
count+=1
device_d[dev]=count
for bro in browser:
cnt=0
for vid in num_visitorid:
if shortdict[vid][0][1][1]==bro:
cnt+=1
#if shortdict[vid][0][1][1]=="Microsoft Internet Explorer 9.0":
#print vid
browser_d[bro]=cnt
#print device_d
print browser_d
#print shortdict
"""
for key,values in gpby_vid.iteritems():
ls=[]
#print type(values[0])
for v in values[0].iteritems():
if v=="useragent" or v=="eventtime":
temp=(v["useragent"], v["eventtime"])
ls.append(temp)
shortdict[key]=ls
"""
#print groupby_visitorid[0][0]
#print len(groupby_visitorid[0][0])
| [
"zenodia@MachineL"
] | zenodia@MachineL |
28f1d23c14a48f7b747382b0a39c605d7dc7eddd | 41e0ce52db01bf288248d80d42b8316c75ea89a0 | /145. Binary Tree Postorder Traversal.py | 20f51c158afe6251a2290e2a408a4d6dd6a0b141 | [] | no_license | Luca2460/Imeneo-Leetcodes-Solutions-in-Python | e088eb5f420a34c3a19e38b0c94ba5d71a44adda | 3bc3ab555ab5b33c645891012d81f3ad96d5b386 | refs/heads/master | 2023-09-01T06:53:54.785342 | 2021-09-28T20:04:52 | 2021-09-28T20:04:52 | 398,030,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,454 | py | # Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def postorderTraversal(self, root: Optional[TreeNode]) -> List[int]:
# ITERATIVE #
ans = []
stack = []
curr = root
while stack or curr:
if curr:
stack.append(curr)
curr=curr.left
else:
temp = stack[-1].right
if not temp:
# mean no right child and no left
# process the root
temp=stack.pop(-1)
ans.append(temp.val)
while stack and temp == stack[-1].right:
# mean this temp node is already right child then now time to process root
temp=stack.pop(-1)
ans.append(temp.val)
else:
curr=temp
return ans
# RECURSIVE #
# if not root:
# return
# res = []
# if root.left:
# res.extend(self.postorderTraversal(root.left))
# if root.right:
# res.extend(self.postorderTraversal(root.right))
# res.append(root.val)
# return res | [
"64102952+Luca2460@users.noreply.github.com"
] | 64102952+Luca2460@users.noreply.github.com |
fbb709c393ef9a43eabc3318d5a3ce251fca4ed3 | b114b48e52e50932788d4ad11525e0f2376836ff | /cell_renders/cell_renderer_progress.py | fdd778d29c85170061e1d1cbebabcbf743c6ded6 | [] | no_license | necrox87/python-gtk3-demos | 7a94eaf27f6680f6506c89d3b22bf6ed86c8c33d | 8f1723157baa835236f53fe196db5eedf66c47c6 | refs/heads/master | 2021-01-25T05:27:58.632541 | 2015-08-03T17:27:12 | 2015-08-03T17:27:12 | 40,137,093 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | from gi.repository import Gtk, GObject
class CellRendererProgressWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="CellRendererProgress Example")
self.set_default_size(200, 200)
self.liststore = Gtk.ListStore(str, int, bool)
self.current_iter = self.liststore.append(["Sabayon", 0, False])
self.liststore.append(["Zenwalk", 0, False])
self.liststore.append(["SimplyMepis", 0, False])
treeview = Gtk.TreeView(model=self.liststore)
renderer_text = Gtk.CellRendererText()
column_text = Gtk.TreeViewColumn("Text", renderer_text, text=0)
treeview.append_column(column_text)
renderer_progress = Gtk.CellRendererProgress()
column_progress = Gtk.TreeViewColumn("Progress", renderer_progress,
value=1, inverted=2)
treeview.append_column(column_progress)
renderer_toggle = Gtk.CellRendererToggle()
renderer_toggle.connect("toggled", self.on_inverted_toggled)
column_toggle = Gtk.TreeViewColumn("Inverted", renderer_toggle,
active=2)
treeview.append_column(column_toggle)
self.add(treeview)
self.timeout_id = GObject.timeout_add(100, self.on_timeout, None)
def on_inverted_toggled(self, widget, path):
self.liststore[path][2] = not self.liststore[path][2]
def on_timeout(self, user_data):
new_value = self.liststore[self.current_iter][1] + 1
if new_value > 100:
self.current_iter = self.liststore.iter_next(self.current_iter)
if self.current_iter == None:
self.reset_model()
new_value = self.liststore[self.current_iter][1] + 1
self.liststore[self.current_iter][1] = new_value
return True
def reset_model(self):
for row in self.liststore:
row[1] = 0
self.current_iter = self.liststore.get_iter_first()
win = CellRendererProgressWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main() | [
"necrox87@gmail.com"
] | necrox87@gmail.com |
e3769995216d97c609231c9536b08e0b5013ba05 | 64a4e3daf2a489226ce58b3d90bb4115cf80a065 | /ArticleSpider/utils/zhihu_login_request.py | b031d7d37304bb6038f65210a41da0bf1b85ed88 | [] | no_license | dreamkong/ArticleSpider | d1635f0e0624ff42c940be4cc8211bac53aeb24d | 9d82a1d15ef769fc4b02d8614dcbbcf05a40ed4c | refs/heads/master | 2021-09-11T15:21:29.957762 | 2018-04-09T06:22:43 | 2018-04-09T06:22:43 | 110,769,369 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,006 | py | # _*_ coding:utf-8 _*_
__author__ = 'dreamkong'
import requests
from ArticleSpider.utils import config
try:
# py2
import cookielib
except:
# py3
import http.cookiejar as cookielib
import re
session = requests.session()
session.cookies = cookielib.LWPCookieJar(filename="cookies.txt")
try:
session.cookies.load(ignore_discard=True)
except:
print("cookie未能加载")
agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) ' \
'Chrome/61.0.3163.100 Safari/537.36'
headers = {
'HOST': 'www.zhihu.com',
'Referer': 'https://www.zhihu.com',
'User-Agent': agent
}
def is_login():
# 通过个人中心页面返回状态码来判断是否为登录状态
inbox_url = "https://www.zhihu.com/notifications"
response = session.get(inbox_url, headers=headers, allow_redirects=False)
if response.status_code != 200:
return False
else:
return True
def get_xsrf():
# 获取xsrf
response = session.get('https://www.zhihu.com', headers=headers)
text = '<input type="hidden" name="_xsrf" value="2abbc22938b648fd5bcc8ed1ec5633a6"/>'
print(response.text)
match_obj = re.match('.*name="_xsrf" value="(.*?)"', response.text)
if match_obj:
print(match_obj.group(1))
return match_obj.group(1)
return ''
def get_index():
response = session.get("https://www.zhihu.com", headers=headers)
with open("index_page.html", "wb") as f:
f.write(response.text.encode("utf-8"))
print("ok")
def get_captcha():
import time
t = str(int(time.time() * 1000))
captcha_url = "https://www.zhihu.com/captcha.gif?r={0}&type=login".format(t)
t = session.get(captcha_url, headers=headers)
with open("captcha.jpg", "wb") as f:
f.write(t.content)
f.close()
from PIL import Image
try:
im = Image.open('captcha.jpg')
im.show()
im.close()
except:
pass
captcha = input("输入验证码\n>")
return captcha
def zhihu_login(account, password):
# 知乎登录
global post_data, post_url
if re.match("^1\d{10}", account):
print("手机号码登录")
post_url = "https://www.zhihu.com/login/phone_num"
post_data = {
"_xsrf": get_xsrf(),
"phone_num": account,
"password": password,
"captcha": get_captcha()
}
else:
if "@" in account:
# 判断用户名是否为邮箱
print("邮箱方式登录")
post_url = "https://www.zhihu.com/login/email"
post_data = {
"_xsrf": get_xsrf(),
"email": account,
"password": password,
"captcha": get_captcha()
}
response_text = session.post(post_url, data=post_data, headers=headers)
print(response_text)
session.cookies.save()
if not is_login():
zhihu_login(config.USER_NAME, config.USER_PASSWORD)
| [
"dreamkong0113@gmail.com"
] | dreamkong0113@gmail.com |
0afa2a4ed820926dbd73e9559f8d0d27c22d61a6 | 25333481c3225f8481bc313dcd86baedffa0dd18 | /users/models.py | d9723bfa99e6089c552507f2be3ac7bdcb412b25 | [] | no_license | ansh-saini/file-sharing | 89d00c47be58a2bf84ab1910ccf51ec186a2abe1 | 4bb80e7e18aaff7f862ac2a14794f84f6be45e5b | refs/heads/master | 2021-06-29T02:18:21.224392 | 2019-03-23T20:16:17 | 2019-03-23T20:16:17 | 176,897,726 | 0 | 4 | null | 2020-10-20T17:37:20 | 2019-03-21T08:02:47 | Python | UTF-8 | Python | false | false | 1,048 | py | from django.db import models
from django.contrib.auth.models import User
class Document(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
file = models.FileField(upload_to='files', blank=True)
name = models.CharField(max_length=50, default='default')
def __str__(self):
return self.name
class Profile(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
docs = models.ManyToManyField(Document, blank=True)
def __str__(self):
return f'{self.user.username} Profile'
#Signals
from django.db.models.signals import post_save, post_delete
from django.contrib.auth.models import User
from django.dispatch import receiver
from .models import Profile
import os
@receiver(post_save, sender=User)
def create_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
@receiver(post_delete, sender=Document)
def auto_delete_file_on_delete(sender, instance, **kwargs):
if instance.file:
if os.path.isfile(instance.file.path):
os.remove(instance.file.path)
| [
"caseymccray201@gmail.com"
] | caseymccray201@gmail.com |
726dc4c74212efe7c5717e5df649cc620e6fcfbd | cba30b3cc59d2264bdc8ddc3fb0d3ffc116c3f07 | /intern/admin.py | bb19bd0cbbcf7cc64acd7e8b4494263349490769 | [] | no_license | ankitm27/web_scaraper | 3d73ec5c44bd513e24e16daa63fa7f6e0d988653 | c62748841630f559569759c1435299750d920101 | refs/heads/master | 2020-12-25T14:14:05.726422 | 2016-06-07T16:11:38 | 2016-06-07T16:11:38 | 60,627,552 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from django.contrib import admin
from .models import Profile,Domain,Database
# Register your models here.
class ProfileModelAdmin(admin.ModelAdmin):
list_display = ["username", "firstname", "lastname", "gender","dob"]
class Meta:
model = Profile
class DomainModelAdmin(admin.ModelAdmin):
list_display = ["username", "domain"]
class Meta:
model = Domain
class DatabaseModelAdmin(admin.ModelAdmin):
list_display = ["domain", "categary", "company"]
class Meta:
model = Database
admin.site.register(Profile, ProfileModelAdmin)
admin.site.register(Domain, DomainModelAdmin)
admin.site.register(Database, DatabaseModelAdmin)
| [
"ankit@debian"
] | ankit@debian |
1a275190f68b9bc189ddc9096a9f624f93f9e569 | 9c58c0736f8cf996cd9835a1cec7c51f63d8546b | /neutron_lib/api/definitions/base.py | c530b150a3fc622a9ff5aa44976773ee4b70af09 | [] | no_license | itendtolosealot/odl_scale_test | d14550a681b00167e0633300d729051bc447512e | 140b2ca87c3ff5b333ac3563b6fd05ae93886ca0 | refs/heads/master | 2020-12-05T23:20:51.271658 | 2020-01-17T09:52:58 | 2020-01-17T09:52:58 | 232,274,893 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,009 | py | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib import constants
KNOWN_ATTRIBUTES = (
'admin_state_up',
'allocation_pools',
'cidr',
'default_prefixlen',
'default_quota',
'description',
'device_id',
'device_owner',
'dns_nameservers',
'enable_dhcp',
'fixed_ips',
'gateway_ip',
'host_routes',
'id',
'ip_version',
'ipv6_address_mode',
'ipv6_ra_mode',
'is_default',
'mac_address',
'max_prefixlen',
'min_prefixlen',
'name',
'network_id',
'port_id',
'prefixes',
'prefixlen',
'project_id',
'qos_policy_id',
constants.SHARED,
'status',
'subnets',
'subnetpool_id',
'tenant_id'
)
KNOWN_RESOURCES = (
'networks',
'ports',
'routers',
'subnets',
'subnetpools'
)
KNOWN_HTTP_ACTIONS = (
'DELETE',
'GET',
'POST',
'PUT',
)
KNOWN_ACTION_STATUSES = (
200,
201,
202,
203,
204,
205,
206,
)
KNOWN_EXTENSIONS = (
'address-scope',
'agent',
'agent-resources-synced',
'allowed-address-pairs',
'auto-allocated-topology',
'availability_zone',
'binding',
'data-plane-status',
'project-default-networks',
'default-subnetpools',
'dhcp_agent_scheduler',
'dns-domain-ports',
'dns-integration',
'dvr',
'empty-string-filtering',
'expose-port-forwarding-in-fip',
'ext-gw-mode',
'external-net',
'extra_dhcp_opt',
'extraroute',
'filter-validation',
'fip-port-details',
'flavors',
'floating-ip-port-forwarding',
'floatingip-autodelete-internal',
'floatingip-pools',
'ip-substring-filtering',
'l3-ha',
'l3_agent_scheduler',
'l3-port-ip-change-not-allowed',
'logging',
'metering',
'multi-provider',
'net-mtu',
'network-ip-availability',
'network-segment-range',
'network_availability_zone',
'pagination',
'port-resource-request',
'port-security',
'project-id',
'provider',
'qos',
'qos-bw-limit-direction',
'qos-gateway-ip',
'qos-rules-alias',
'quotas',
'rbac-policies',
'router',
'router_availability_zone',
'security-group',
'segment',
'service-type',
'sort-key-validation',
'sorting',
'standard-attr-description',
'standard-attr-revisions',
'standard-attr-segment',
'standard-attr-timestamp',
'subnet_allocation',
'subnet_onboard',
'subnet-segmentid-enforce',
'subnet-segmentid-writable',
'tag',
'trunk',
'trunk-details',
# Add here list of extensions with pointers to the project repo, e.g.
# 'bgp', # http://git.openstack.org/cgit/openstack/neutron-dynamic-routing
# http://git.openstack.org/cgit/openstack/neutron-fwaas
'fwaas',
'fwaasrouterinsertion',
'fwaas_v2',
'bgpvpn', # https://git.openstack.org/cgit/openstack/networking-bgpvpn
'bgpvpn-routes-control',
'bgpvpn-vni',
# git.openstack.org/cgit/openstack/neutron-vpnaas
'vpnaas',
'vpn-endpoint-groups',
'vpn-flavors',
# http://git.openstack.org/cgit/openstack/networking-sfc:
'flow_classifier',
'sfc',
)
KNOWN_KEYWORDS = (
'allow_post',
'allow_put',
'convert_to',
'convert_list_to',
'default',
'enforce_policy',
'is_filter',
'is_sort_key',
'is_visible',
'primary_key',
'required_by_policy',
'validate',
'default_overrides_none',
'dict_populate_defaults',
)
| [
"ashvin213@gmail.com"
] | ashvin213@gmail.com |
2fb1b1fd1a6319058f7953478c369bd942b755c8 | 0f6d6d4c8cc4d5c4a669b3d5bf2308b593899fdb | /store/views.py | ed9d685ded1e9ab2f229077cd4bcaf2234f7de35 | [] | no_license | abhisljh/Tshop | e1346bc982fc6ad0867b6f87c215b9616bd560ed | a6c03028cc8724ef324e3b4242d20850e8ee8e52 | refs/heads/master | 2023-02-22T05:27:16.469239 | 2021-01-15T08:26:15 | 2021-01-15T08:26:15 | 329,845,103 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,455 | py | from django.shortcuts import render , redirect
from django.http import HttpResponse
from store.forms.authforms import CustomerCreationForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth import authenticate, login as loginUser
from django.contrib.auth import logout
from store.models import Tshirt, SizeVariant , Cart
from math import floor
# Create your views here.
def show_product(request, slug):
tshirt = Tshirt.objects.get(slug=slug)
size = request.GET.get('size') #holding the size on click of size
if size is None:
size = tshirt.sizevariant_set.all().order_by('price').first()#retriving the size from tshirt model
else:
size = tshirt.sizevariant_set.get(size=size) #holding the size on click of size
size_price = floor(size.price) #holding teh size price
sell_price = size_price - (size_price *(tshirt.discount/100)) #fundinto for discount
sell_price = floor(sell_price)#price which is coming on details page
context = {'tshirt': tshirt, 'price': size_price,'sell_price': sell_price,'active_size': size} #passing all the value in dictionary
return render(request, template_name ='store/product_details.html', context = context)
def home(request):
tshirts = Tshirt.objects.all()
print(tshirts)
print(len(tshirts))
cart = request.session.get('cart')
print(cart)
context = {
"tshirts": tshirts
}
return render(request, template_name='store/home.html', context = context)
def cart(request):
cart = request.session.get('cart')
if cart is None:
cart = [] #asigning empy list
for c in cart:
tshirt_id = c.get('tshirt')
tshirt = Tshirt.objects.get(id=tshirt_id)
c['size'] = SizeVariant.objects.get(tshirt=tshirt_id, size=c['size'])
c['tshirt'] = tshirt
print(cart)
return render(request, template_name='store/cart.html' , context ={'cart': cart })
def orders(request):
return render(request, template_name='store/orders.html')
def login(request):
if request.method == 'GET':
form = AuthenticationForm() #created object
return render(request, template_name='store/login.html', context={
'form' : form
})
else:
form = AuthenticationForm(data = request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username = username , password = password)
if user:
loginUser(request, user)
#{ size, tshirt, quantity}
cart = Cart.objects.filter(user = user)
session_cart =[]
for c in cart:
obj = {
'size': c.sizeVariant.size,
'tshirt': c.sizeVariant.tshirt.id
,'quantity': c.quantity
}
session_cart.append(obj)
request.session['cart'] = session_cart
return redirect('homepage')
else:
return render(request, template_name='store/login.html', context={
'form' : form
})
#staring of signup form
def signup(request):
if(request.method == 'GET'):
form = CustomerCreationForm()
context = {
"form": form
}
return render(request, template_name='store/signup.html', context=context)
else:
form = CustomerCreationForm(request.POST)
if form.is_valid():
user = form.save();
user.email = user.username
user.save()
print(user)
return render(request, template_name='store/login.html')
context = {
"form": form
}
return render(request, template_name='store/signup.html', context=context)
def signout(request):
logout(request)
return render(request, template_name='store/home.html')
def add_to_cart(request, slug, size):
user = None
if request.user.is_authenticated:
user = request.user
cart = request.session.get('cart')
if cart is None:
cart = []
tshirt = Tshirt.objects.get(slug = slug)
size_temp = SizeVariant.objects.get(size = size , tshirt = tshirt) #we are storing the size and tshirt in size_temp variable
flag = True
for cart_obj in cart:
t_id = cart_obj.get('tshirt')
size_short = cart_obj.get('size')
if t_id == tshirt.id and size == size_short:
flag = False
cart_obj['quantity'] = cart_obj['quantity']+1
if flag:
cart_obj = {
'tshirt': tshirt.id,
'size': size,
'quantity': 1
}
cart.append(cart_obj)
if user is not None:
existing = Cart.objects.filter(user = user, sizeVariant = size_temp)
if len(existing) > 0:
obj = existing[0]
obj.quantity = obj.quantity+1
obj.save()
else:
c = Cart()
c.user = user
c.sizeVariant = size_temp
c.quantity = 1
c.save()
request.session['cart'] = cart
return_url = request.GET.get('return_url')
print(slug, size)
return redirect(return_url ) | [
"abhi.sljh@gmail.com"
] | abhi.sljh@gmail.com |
1ec3fb4cd0fabf1c806c99529f51f488eb32da09 | e19fecb69b2615fc65b5ae0aedc12b09190ea142 | /tvpdownloader/tvpdownloader.py | 7b2c23c74745ac4eb51faf110f40e45536a10526 | [
"MIT"
] | permissive | patkub/tvpdownloader | 6f63edbcab60de7b81d39e01c4365f5abb4c49c1 | 1abe7a6486ac3bc40d37c52314eebe9fa9aa43a9 | refs/heads/master | 2020-03-19T04:40:57.161123 | 2018-06-05T19:06:48 | 2018-06-05T19:06:48 | 135,853,321 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,117 | py | from bs4 import BeautifulSoup
import requests
import re
import urllib.request
import shutil
import os
import filetype
from mimetypes import guess_extension
class TVPDownloader:
def __init__(self, url, dest="downloads/", quality=5):
self.url = url
self.dest = dest
# quality can be 2 to 7
self.quality = str(quality)
# check destination
if not os.path.exists(self.dest):
os.makedirs(self.dest)
def find_player(self):
# find player
url_data = requests.get(self.url).text
url_soup = BeautifulSoup(url_data, 'html.parser')
self.player_src = url_soup.find(id="JS-TVPlayer-Wrapper").get('data-src')
#print("Player = " + self.player_src)
return self.player_src
def find_episode(self):
# find episode url
player_data = requests.get(self.player_src).text
player_soup = BeautifulSoup(player_data, 'html.parser')
episode_src = player_soup.find(id="tvplayer").get('src')
self.episode_url = "https://vod.tvp.pl" + episode_src
#print("Episode url = " + self.episode_url)
return self.episode_url
def find_episode_link(self):
# find episode link
episode_data = requests.get(self.episode_url).text
episode_soup = BeautifulSoup(episode_data, 'html.parser')
episode_script = episode_soup.find_all("script")[9].text
self.episode_link = re.findall("src:\'(.*?)\', type: \'video/mp4\'", episode_script)[0]
# replace quality
self.episode_link = self.episode_link.replace("video-5", "video-" + self.quality)
#print("Episode link = " + self.episode_link)
return self.episode_link
def parse_output_name(self):
series = re.search("\/([^/,]+)\,", self.url).group(1)
episode = re.search("\,(.*?)\,", self.url).group(1)
self.out_name = series + " - " + episode + " - " + self.quality
#print("Output name = " + self.out_name)
return self.out_name
def get(self):
# download
self.find_player()
self.find_episode()
episode_link = self.find_episode_link()
output_name = self.parse_output_name()
# determine episode file type
mimetype = urllib.request.urlopen(episode_link).info().get_content_type()
#print("Content-Type = " + mimetype)
# guess episode extension
extension = guess_extension(mimetype)
#print("Guessed extension = " + extension)
# combine output path
output_path = os.path.join(self.dest, output_name + extension)
# write to file
print("Writing " + output_path)
# https://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3
with urllib.request.urlopen(episode_link) as response, open(output_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
if __name__ == '__main__':
# list of urls to download
urls = [
{"url": "https://vod.tvp.pl/video/rodzinkapl,odc1,3994796", "quality": 5},
{"url": "https://vod.tvp.pl/video/rodzinkapl,odc-221,34842411", "quality": 5},
]
for u in urls:
# download each url
print("Downloading: " + u['url'] + " Quality: " + str(u['quality']))
TVPDownloader(url=u['url'], quality=u['quality']).get()
print("Done!")
| [
"patrick@antuple.net"
] | patrick@antuple.net |
f58c187b74b26d3390d310ce5c9f46d0fae6d42e | bcf45f0e853eb7610f25d52f7f7e639ecca8e6dc | /env/lib/python3.6/hashlib.py | 5640210ff778334ab011be7b6f3c3010d5db276d | [] | no_license | CuriousCat318/my-first-blog | 6eaeee00ff37527a0a3a954e5a18fd3ce6e5a864 | 94beb647221c6d63f1ab3106ced93d631c921345 | refs/heads/master | 2020-03-26T07:54:43.822201 | 2018-08-14T11:02:13 | 2018-08-14T11:02:13 | 144,677,403 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 44 | py | /home/blr/anaconda3/lib/python3.6/hashlib.py | [
"sharonleo318@gmail.com"
] | sharonleo318@gmail.com |
48f3945ce6fa12055d1619a17c0eaf14a3a41a62 | f508bc08786c49e8ef1c714a23782c1f83665599 | /Day5(2)/noise.py | f78bef2a10221df1c3a11bd4f75d23a486df7092 | [] | no_license | neha-swapnil/OpenCV | 3b0e787dcfbab32708ab463b1ca48f0e21d9eae0 | 0229960bee7ae5b1ac36f8b055b32e1a017d52e1 | refs/heads/master | 2020-04-25T17:57:30.830910 | 2019-03-10T14:49:39 | 2019-03-10T14:49:39 | 172,561,846 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 772 | py | import cv2
import random
import numpy as np
import matplotlib.pyplot as plt
def main():
path = "/home/neha/Desktop/jungkook/jk111.jpg"
img = cv2.imread(path, 1)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
rows, columns, channels = img.shape
p = 0.05 #5% probability of noise
output= np.zeros(img.shape, np.uint8)#creating a new black window
for i in range(rows):
for j in range(columns):
r = random.random()
if r < p/2:#0-p/2
#pepper sprinkle(black)
output[i][j] = [0, 0, 0]
elif r < p:#p/2-p
#salt sprinkle(white)
output[i][j] = [255, 255, 255]
else:
#keeping the image the same
output[i][j] = img[i][j]
plt.imshow(output)
plt.title('Image with Salt and Pepper noise')
plt.show()
if __name__ == "__main__":
main()
| [
"noreply@github.com"
] | noreply@github.com |
7b0b570f4e45b070be1107afdd0f07623726becb | 93603a9fa81bd1722ee92f29acab164c4bd82603 | /day.py | e8ce8bd806673aa3b95a95f57610d9982442b70a | [] | no_license | YOGESH-TECH/python-basic-problems | 4160f090d78cb1b11d10f774375014a2f6c27cf8 | db3b57e7732e00339788a09f2e4e1296bea70790 | refs/heads/master | 2022-03-05T12:31:23.583636 | 2019-09-05T10:35:21 | 2019-09-05T10:35:21 | 199,827,465 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 273 | py | day=int(input("enter the day here :"))
if day==1:
print('monday')
elif day==2:
print('tue')
elif day==3:
print('wed')
elif day==4:
print('thu')
elif day==5:
print('fri')
elif day==6:
print('sat.')
elif day==7:
print('sun')
else:
print('invalid day') | [
"noreply@github.com"
] | noreply@github.com |
b1ef1d27160d7876720034008e23c14d70be8c22 | ec5a4806d6b18bf941d9b1897a5079983b988062 | /test/interview.py | 0291d9dcba03e7131a47037599593bc7954c6508 | [] | no_license | Topaz1618/Snow | c11bdceefab7c57059955f3848fcfc3b33541c8f | 036afec1e7cf999eb95cd7034bb599266fe53c34 | refs/heads/master | 2021-05-06T19:08:01.358090 | 2017-11-27T07:45:10 | 2017-11-27T07:45:10 | 112,159,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,839 | py | #_*_coding:utf-8_*_
# Author:Topaz
import socket
import select
import time
sock = socket.socket(socket.AF_INET)
sock.bind(('127.0.0.1',1233))
sock.setblocking(0)
sock.listen(128)
inputs = []
inputs.append(sock) #套接字加入列表,有人发来请求inputs会发生变化
print("初始input\n %s" %inputs)
count = 0
while True:
# print("select 监听的列表\n %s" %inputs)
''' 1.调用select函数对套接字进行监视之前,必须将要监视的套接字分配给数组(readfds,writefds,exceptfds)中的一个
2.inputs列表加入readfds集合,有描述符(fd)就绪后select返回
3.怎么叫就绪呢,readsfs满足四种条件之一就就绪,这里只说一种就是大于接受缓存区最低水位1,也就是有数据就就绪可读
)'''
print(count,inputs )
time.sleep(2)
count += 1
rlist,wlist,elist = select.select(inputs,[],[],0.1)
for r in rlist: #r的种类有很多种
if r == sock: #如果r == sock 就是有人发来请求
print('wtr', r)
# a = sock.accept()
# print("accpet",a)
conn,addr = sock.accept() #accpet函数返回 conn新的套接字,addr存放客户端的地址
conn.setblocking(0)
inputs.append(conn) #把新来的连接也加到input,让select监测它
else:
data = b""
while True:
try:
chunk = r.recv(1024) #接收数据
data = data + chunk
except Exception as e:
chunk = None
if not chunk:
print("收完了88")
break
r.sendall(b'biu')
inputs.remove(r)
r.close()
#就绪 ==> http://www.leoox.com/?p=82
| [
"topaz@163.com"
] | topaz@163.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.