blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2 values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23 values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220 values | src_encoding stringclasses 30 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 2 10.3M | extension stringclasses 257 values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1d4ff5552c38e2033ad74010a713ece92f2d92f8 | 1fa8b6a8a11277d0d1807f98efaca40ea5e37b24 | /BOJ/연구소 3.py | f4486a7319ab0259cccd708a70f2fff720144fc7 | [] | no_license | wontaechoi/algorithm | c41c5857df5a0fdc2b8c3226aa51e113b667efc5 | a92261f3e6ac8fa515e2c2332e34d6a2176f243d | refs/heads/master | 2022-12-31T06:48:55.941394 | 2020-10-19T14:42:18 | 2020-10-19T14:42:18 | 287,288,841 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,787 | py | def combination(array, m):
def combi(arr, m):
if len(arr) == m:
all_comb.append(arr.copy())
return
start = array.index(arr[-1]) +1 if arr else 0
for i in range(start, len(array)):
combi(arr + [array[i]], m)
combi([], m)
return
def bfs(virus):
global N, lab, viruses, answer
queue = []
visited = [[float('inf') for _ in range(N)] for __ in range(N)]
for x, y in virus:
queue.append((x,y,0))
visited[x][y] = 0
highest = 0
while queue:
x, y, t = queue.pop(0)
if lab[x][y] != 2:
highest = max(highest, t)
for i in range(4):
new_x = x + dx[i]
new_y = y + dy[i]
if new_x < 0 or new_x >= N or new_y < 0 or new_y >= N:
continue
if lab[new_x][new_y] == 1:
continue
if visited[new_x][new_y] <= t + 1:
continue
queue.append((new_x, new_y, t + 1))
visited[new_x][new_y] = t + 1
for i in range(N):
for j in range(N):
if lab[i][j] == 0:
if visited[i][j] == float('inf'):
return float('inf')
return highest
N, M = map(int ,input().split())
lab = []
viruses = []
dx = [-1, 0 ,1, 0]
dy = [0, 1, 0, -1]
for i in range(N):
row = list(map(int, input().split()))
for j in range(N):
if row[j] == 2:
viruses.append((i,j))
lab.append(row)
all_comb =[]
combination([i for i in range(len(viruses))], M)
answer = float('inf')
for com in all_comb:
virus = []
for i in com:
virus.append(viruses[i])
ans = bfs(virus)
answer = min(ans, answer)
answer = answer if answer != float('inf') else -1
print(answer) | [
"35357369+wontaechoi@users.noreply.github.com"
] | 35357369+wontaechoi@users.noreply.github.com |
6fd7c178a26f5ee9680985d398bfcacdfed91699 | 0d7d129e975050f754a60a0a64ec9d0658c2213c | /rsscraper/users/tests/test_forms.py | dbcaf9ee5c1fcc191eb1880febc3ba7120c88a41 | [
"MIT"
] | permissive | Sunno/rsscraper | 1781daaf22655961809527992e58ce3d5094b813 | a9897d507980ec4525e8521188cf76203829caca | refs/heads/master | 2020-11-27T14:44:20.760498 | 2020-01-10T15:40:52 | 2020-01-10T15:40:52 | 229,495,438 | 0 | 0 | MIT | 2020-06-06T01:45:52 | 2019-12-21T23:29:29 | Python | UTF-8 | Python | false | false | 1,117 | py | import pytest
from rsscraper.users.forms import UserCreationForm
from rsscraper.users.tests.factories import UserFactory
pytestmark = pytest.mark.django_db
class TestUserCreationForm:
def test_clean_username(self):
# A user with proto_user params does not exist yet.
proto_user = UserFactory.build()
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert form.is_valid()
assert form.clean_username() == proto_user.username
# Creating a user.
form.save()
# The user with proto_user params already exists,
# hence cannot be created.
form = UserCreationForm(
{
"username": proto_user.username,
"password1": proto_user._password,
"password2": proto_user._password,
}
)
assert not form.is_valid()
assert len(form.errors) == 1
assert "username" in form.errors
| [
"thesunno@gmail.com"
] | thesunno@gmail.com |
4737339fcf6dfa728fd3cdf54ddc19cdc0f34bfc | 6d4509dd7945cc7d0bb666c775f52a0a9a6cb139 | /tests/demoqa/test_async.py | c3319f3c4bfdba7580b0afdf9e225b531d44ee98 | [] | no_license | N1addict/python-playwright | c1038a8890aa236035dd0c90d26ccb2344821539 | c8bc02936a1c8b573c96a614d92af3e359ab939e | refs/heads/main | 2023-03-17T08:37:51.686158 | 2021-03-02T13:54:06 | 2021-03-02T13:54:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,442 | py | import pytest
from playwright.async_api import async_playwright
from playwright.async_api._generated import ElementHandle
from pytest import fixture
base_url: str = "https://www.demoqa.com"
@pytest.mark.asyncio
class TestBase:
async def test_visit_elements_page(self) -> None:
"""Test that the Elements page can be navigated to.
:param page: A Playwright browser page.
"""
async with async_playwright() as playwright:
browser = await playwright.chromium.launch()
page = await browser.new_page()
await page.goto(f"{base_url}/elements")
header_text: str = await page.inner_text(".main-header")
assert "Elements" in header_text
async def test_collapse_elements_container(self) -> None:
"""Test that the Elements container may be collapsed by a user.
:param page: A Playwright browser page.
"""
async with async_playwright() as playwright:
browser = await playwright.chromium.launch()
page = await browser.new_page()
await page.goto(f"{base_url}/elements")
element_group: ElementHandle = await page.wait_for_selector(
".element-group"
)
await page.click(".header-right")
element_list_class: str = await element_group.eval_on_selector(
".element-list", "el => el.className"
)
assert "show" not in element_list_class
@pytest.mark.asyncio
class TestTextBox:
user: dict = {
"name": "Test Tester",
"email": "test@test.com",
"currentAddress": "3930 N Pine Grove Ave, Chicago, IL 60613",
"permanentAddress": "24 Girard St, Rochester, NY 14610",
}
async def test_submit_valid_data(self):
"""Test that valid data may be submitted.
:param page: A Playwright browser page.
"""
async with async_playwright() as playwright:
browser = await playwright.chromium.launch()
page = await browser.new_page()
await page.goto(f"{base_url}/text-box")
user_form: ElementHandle = await page.wait_for_selector(
"#userForm"
)
username_field: ElementHandle = await user_form.wait_for_selector(
"#userName"
)
email_field: ElementHandle = await user_form.wait_for_selector(
"#userEmail"
)
current_address_field: ElementHandle = (
await user_form.wait_for_selector("#currentAddress")
)
permanent_address_field: ElementHandle = (
await user_form.wait_for_selector("#permanentAddress")
)
await username_field.fill(self.user["name"])
await email_field.fill(self.user["email"])
await current_address_field.fill(self.user["currentAddress"])
await permanent_address_field.fill(self.user["permanentAddress"])
await page.click("#submit")
output_field: ElementHandle = await page.wait_for_selector(
"#output"
)
for key, value in self.user.items():
ele_value: str = await output_field.eval_on_selector(
f"#{key}", "el => el.innerText"
)
assert value in ele_value
async def test_error_when_invalid_email(self):
"""Test that invalid data may not be submitted.
:param page: A Playwright browser page.
"""
async with async_playwright() as playwright:
browser = await playwright.chromium.launch()
page = await browser.new_page()
await page.goto(f"{base_url}/text-box")
user_form: ElementHandle = await page.wait_for_selector(
"#userForm"
)
email_field: ElementHandle = await user_form.wait_for_selector(
"#userEmail"
)
await email_field.fill("test")
await page.click("#submit")
email_class: str = await user_form.eval_on_selector(
"#userEmail", "el => el.className"
)
assert "field-error" in email_class
@pytest.mark.asyncio
class TestButtons:
@pytest.mark.parametrize(
"button_type",
[
("Double Click", "doubleClickMessage"),
("Right Click", "rightClickMessage"),
("Click", "dynamicClickMessage"),
],
)
async def test_click_types(self, button_type: fixture):
"""Test that specific click actions provide a result.
:param button_type: A tuple containing click action and result.
:param page: A Playwright browser page.
"""
click_action, result = button_type
async with async_playwright() as playwright:
browser = await playwright.chromium.launch()
page = await browser.new_page()
await page.goto(f"{base_url}/buttons")
if click_action == "Double Click":
await page.dblclick("#doubleClickBtn")
elif click_action == "Right Click":
await page.click("#rightClickBtn", button="right")
else:
await page.click('button >> text="Click Me"')
message: ElementHandle = await page.is_visible(f"#{result}")
assert message
| [
"thompson.jonm@gmail.com"
] | thompson.jonm@gmail.com |
79652b0f0f24d659472eca84f4e185eca3bbabe8 | 4870d1009e25783ad8c394fe6f09031647896ff7 | /quartus_cadence_netlist_merger/main.py | 231d8e5794e6b358e51b8548af632566e7dcbe29 | [] | no_license | yuravg/quartus_cadence_netlist_merger | 5c5b4006b05a169593ab4548f1878e901d4949a7 | 637c44a409e2343f688b8e0f8d95a246d1fb557d | refs/heads/master | 2021-07-03T16:46:43.669145 | 2020-12-03T07:05:02 | 2020-12-03T07:16:28 | 204,642,819 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 216 | py | """Run point"""
from .qp_cnl_merger import QuartusCadenceMerger
from .commandlinearg import get_args
def main():
"""Run point for the application script"""
get_args()
QuartusCadenceMerger().mainloop()
| [
"yuravg@gmail.com"
] | yuravg@gmail.com |
4d7cfd0ffccbfd283de36c33f18f12867f841c60 | 480ba90ae43f6ef5072f163fe195f83408cc8842 | /Projects/csbh_stat/csbh_stat/modules/idcs.py | 7a0a6538f74e63c00036a91081bf1bd020e52478 | [] | no_license | raystyle/Myporjects | 04478480700f54038b34edb0f6daeec31078b1a5 | 1892b74fb390c6c61a77d86cc781ee64f512f0b0 | refs/heads/master | 2021-05-21T08:07:31.743139 | 2020-04-02T13:05:25 | 2020-04-02T13:05:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,950 | py | #encoding:utf-8
import time
import datetime
from dbutils import MySQLConnection as SQL
def get_idc_info_date():
now_year = datetime.datetime.now().strftime('%Y')
_sql_all = 'select sum(combined) from idc_data where date like %s group by date order by date;'
_sql_alone = 'select date,idcname,combined from idc_data where date like %s order by date'
_args = (now_year+'%',)
_sql_count_all, rt_list_all = SQL.excute_sql(_sql_all,_args)
_sql_count, rt_list = SQL.excute_sql(_sql_alone,_args)
'''
计算每月总收入-------------
'''
rt = []
# print rt_list_all,_sql_count
for x in rt_list_all:
rt.append(float(x[0]))
all_date = []
all_date.append({'data': rt ,'name': '总支出'})
'''
计算每个机房每个月的收入
'''
rs = []
for x in rt_list:
if len(rs) != 0:
for y in rs:
if y['name'] == x[1]:
y['data'].append(float(x[2]))
status = 0
break
else:
status = 1
else:
status = 1
if status == 1:
rs.append({'name': x[1], 'data': [float(x[2])]})
# 返回总支出和单机房月支持的列表
return all_date + rs
def month_get():
d = datetime.datetime.now()
dayscount = datetime.timedelta(days=d.day)
dayto = d - dayscount
date_from = datetime.datetime(dayto.year, dayto.month, 1)
# date_to = datetime.datetime(dayto.year, dayto.month, dayto.day, 23, 59, 59)
return '-'.join(str(date_from).split('-')[:2])
def get_new_idcinfo(_local_date=month_get()):
colloens = ('id', 'date', 'idcname', 'cabinet', 'cabinet_price','host_amount','bandwidth','bandwidth_price','bandwidth_amount','combined','status','info')
_sql = 'select * from idc_data where date = %s'
_args = (_local_date,)
rt = []
_sql_count, rt_list = SQL.excute_sql(_sql,_args)
for i in rt_list:
rt.append(dict(zip(colloens,i)))
return _local_date,rt
def add_new_before_select(params):
idcname = params.get('idcname')
date = params.get('date')
_sql = 'select * from idc_data where idcname = %s and date = %s'
_args = (idcname,date)
_sql_count, rt_list = SQL.excute_sql(_sql, _args)
if _sql_count != 0 :
return False,idcname+' '+date+'已存在'
return True,'进行入库操作'
def add_new_idcinfo(params):
idcname = params.get('idcname')
date = params.get('date')
cabinet = params.get('cabinet')
cabinet_price = params.get('cabinet_price')
host_amount = params.get('host_amount')
bandwidth = params.get('bandwidth')
bandwidth_price = params.get('bandwidth_price')
bandwidth_amount = float(bandwidth) * float(bandwidth_price)
combined = float(host_amount) + float(bandwidth_amount)
status = params.get('status')
info = params.get('info')
# print date,idcname,cabinet,cabinet_price,host_amount,bandwidth,bandwidth_price,bandwidth_amount,combined,status,info
_sql = 'insert into idc_data(date,idcname,cabinet,cabinet_price,host_amount,bandwidth,bandwidth_price,bandwidth_amount,combined,status,info) values(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)'
_args = (date,idcname,cabinet,cabinet_price,host_amount,bandwidth,bandwidth_price,bandwidth_amount,combined,status,info)
_sql_count,rt_list = SQL.excute_sql(_sql,_args,fetch=False)
if _sql_count != 0:
return True ,'添加成功'
return False ,'添加失败'
def update_idcinfo(params):
id = params.get('id')
idcname = params.get('idcname')
date = params.get('date')
cabinet = params.get('cabinet')
cabinet_price = params.get('cabinet_price')
host_amount = params.get('host_amount')
bandwidth = params.get('bandwidth')
bandwidth_price = params.get('bandwidth_price')
bandwidth_amount = float(bandwidth) * float(bandwidth_price)
combined = float(host_amount) + float(bandwidth_amount)
status = params.get('status')
info = params.get('info')
_sql = 'update idc_data set date = %s, idcname = %s , cabinet = %s , cabinet_price = %s, host_amount = %s, ' \
'bandwidth = %s, bandwidth_price = %s, bandwidth_amount = %s,combined = %s,status = %s,info = %s where id = %s'
_args = (date,idcname,cabinet,cabinet_price,host_amount,bandwidth,bandwidth_price,bandwidth_amount,combined,status,info,id)
_sql_count, rt_list = SQL.excute_sql(_sql, _args, fetch=False)
if _sql_count != 0:
return True ,'更新成功'
return False ,'更新失败'
def delete_idcinfo(params):
id = params.get('id')
idcname = params.get('idcname')
date = params.get('date')
_sql = 'delete from idc_data where id = %s and date = %s and idcname = %s'
_args = (id,date,idcname)
_sql_count, rt_list = SQL.excute_sql(_sql,_args, fetch=False)
if _sql_count != 0:
return True, '删除成功'
return False, '删除失败' | [
"cui6522123@gmail.com"
] | cui6522123@gmail.com |
b9e01fd5c696231a6b883b2817e73b84b476dbaa | 1646b3fe9000c3109695e99b4bb75679577906ff | /236.LowestCommonAncestorOfABinaryTree.py | 78bdf6a542e4686bb190fc7b9d792fdb40e9fbeb | [] | no_license | yao9208/lc | 5ecf6720886beb951c9a70433f53a0ec0bcb74dc | 024c1b5c98a9e85706e110fc2be8dcebf0f460c3 | refs/heads/master | 2020-04-03T20:55:40.199637 | 2017-02-10T08:30:46 | 2017-02-10T08:30:46 | 56,478,941 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 554 | py | class Solution(object):
def lowestCommonAncestor(self, root, p, q):
"""
:type root: TreeNode
:type p: TreeNode
:type q: TreeNode
:rtype: TreeNode
"""
if not root:
return root
if root==p or root==q:
return root
left = self.lowestCommonAncestor(root.left, p, q)
right = self.lowestCommonAncestor(root.right, p, q)
if left:
if right:
return root
else:
return left
return right
| [
"yao9208@foxmail.com"
] | yao9208@foxmail.com |
38bb3f3e1a953585991861cdece43f807fb18e8a | 267f639b360d1125d89cda14e5c6b699d57f82b6 | /app/app.py | e470c06958a3187b1831a47cb417fc0aaa5e5845 | [] | no_license | DoritaSuarez/DashExamples | cb1a086d792c0bc019f86984ff97b4d9f419557d | c9ab03d29528b9682bf669bfb54266c7b5d520d9 | refs/heads/master | 2021-07-17T03:47:31.689429 | 2020-10-27T14:32:48 | 2020-10-27T14:32:48 | 223,849,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 833 | py | import dash
import dash_html_components as html
class CustomDash(dash.Dash):
def interpolate_index(self, **kwargs):
# Inspect the arguments by printing them
print(kwargs)
return '''
<!DOCTYPE html>
<html lang="en">
<head>
<script src="https://cdn.plot.ly/plotly-latest.min.js"></script>
<script src="assets/custom_script.js"></script>
</head>
<body>
<div id="graph"></div>
</body>
{app_entry}
{config}
{scripts}
{renderer}
</html>
'''.format(
app_entry=kwargs['app_entry'],
config=kwargs['config'],
scripts=kwargs['scripts'],
renderer=kwargs['renderer'])
app = CustomDash()
app.layout = html.Div('Simple Dash App')
if __name__ == '__main__':
app.run_server(debug=True) | [
"dmsuarezv@gmail.com"
] | dmsuarezv@gmail.com |
a92e766b88a066359142d4605bea2f9125c68b2d | 0acd8226bb830f291acc5f9ee24b5ef04c51b401 | /mongo-search/main.py | a38702e110fc7b3f5e1d86255b7ee63c65088aab | [] | no_license | jiafangtao/web_programming | f97eb884487afdbe5a0477f7b54d36546ce670db | 33f8943c91330c30677aa0518f21eb823441e344 | refs/heads/master | 2023-08-31T18:11:45.335547 | 2023-08-08T06:36:44 | 2023-08-08T06:36:44 | 90,827,978 | 0 | 0 | null | 2023-08-08T06:36:45 | 2017-05-10T06:19:59 | JavaScript | UTF-8 | Python | false | false | 4,064 | py | import pymongo
import jieba
import pprint
server = 'localhost'
port = 27017
def tokenize_chinese(s):
return ' '.join(jieba.cut(s, cut_all=False))
client = pymongo.MongoClient(server, port)
db = client.stores
print(db.name)
db.stores.drop()
ret = db.stores.insert_many(
[
{ "uid": 1, "name": "Java Hut", "description": "Coffee and cakes" },
{ "uid": 2, "name": "Burger Buns", "description": "Gourmet hamburgers" },
{ "uid": 3, "name": "Coffee Shop", "description": "Just coffee" },
{ "uid": 4, "name": "Clothes Clothes Clothes", "description": "Discount clothing", "additions": "free coffee" },
{ "uid": 5, "name": "Java Shopping", "description": "Indonesian goods" },
{ "uid": 6, "name": "星巴克java中文", "description": "测试中文的搜索", "_tr": { "name": [], "description": []}},
{ "uid": 7, "name": "星巴克是卖糖水的地产公司", "description": "如果没有空格也没有分词搜索不出来", "_tr": { "name": [], "description": []}},
{ "uid": 8, "name": "星巴克 是卖糖水的 地产公司", "description": "如果有空格, 没有分词那就碰巧能够搜出来", "_tr": { "name": [], "description": []}}
]
)
print(f"insert many:: {ret}")
ret = db.stores.insert_one({"uid": 999, "name": "Thinking in JAVA", "revision": 2, "description": "A great JAVA programming book by Bruce Eckel", "price": 49.99})
print(f"insert_one: {ret}")
db.stores.insert_one({"name": "Thinking in C++", "revision": 2, "description": "A great C++ programming book by Bruce Eckel. sister of thinking in java", "price": 49.99})
db.stores.insert_one({"name": "PS Pet Shop", "openning": ["Monday", "Tuesday", "Friday"]})
# A typical case is that user knows which fields to index. e.g.
# Create a 'text' index for field 'description'
#ret =db.stores.create_index( [("description", pymongo.TEXT), ("name", pymongo.TEXT), ("_tr.name", pymongo.TEXT)])
#print(f"index is created: {ret}")
# In our case we need to index all fields -
# Create a 'text' index for all fields (wildcard index)
db.stores.create_index([("$**", pymongo.TEXT)])
# search for an exact phrase
ret = db.stores.find( { "$text": { "$search": "\"coffee shop\"" } } )
print("\n>>>>>>>>>>>>>>> Search result for phrase - [\"coffee shop\"]")
for store in ret:
pprint.pprint(store)
# search for non-exact phrase
ret = db.stores.find( { "$text": { "$search": "coffee shop" } } )
print("\n>>>>>>>>>>>>>>> Search result for phrase - [coffee OR shop]")
for store in ret:
pprint.pprint(store)
# exclude one with minus operator (-)
ret = db.stores.find( { "$text": { "$search": "java shop -coffee" } } )
print("\n>>>>>>>>>>>>>>> Search result for phrase - [java shop -coffee]")
for store in ret:
pprint.pprint(store)
# Update reserved fields to save tokenized Chinese fields
store = db.stores.find_one({"uid": 6})
print("============= store =============")
print(store["name"])
print(tokenize_chinese(store["name"]))
print(store["description"])
print(tokenize_chinese(store["description"]))
db.stores.update_one({"uid": 6}, {
"$set": { "_tr": {"name": tokenize_chinese(store["name"]), "description": tokenize_chinese(store["description"]) }}
})
ret = db.stores.find( { "$text": { "$search": "中文" } } )
print("\n>>>>>>>>>>>>>>> Search result for phrase - [中文]")
for store in ret:
pprint.pprint(store)
ret = db.stores.find( { "$text": { "$search": "星巴克" } } )
print("\n>>>>>>>>>>>>>>> Search result for phrase - [星巴克]")
for store in ret:
pprint.pprint(store)
ret = db.stores.find( { "$text": { "$search": "公司" } } )
print("\n>>>>>>>>>>>>>>> Search result for phrase - [公司]")
for store in ret:
pprint.pprint(store)
ret = db.stores.find( { "$text": { "$search": "地产公司" } } )
print("\n>>>>>>>>>>>>>>> Search result for phrase - [地产公司]")
for store in ret:
pprint.pprint(store)
# TODO: sort by relevance score
#
#db.stores.find(
# { "$text": { "$search": "java coffee shop" } },
# { "score": { "$meta": "textScore" } }
#).sort( { "score": { "$meta": "textScore" } } )
#
| [
"jiafangtao@gmail.com"
] | jiafangtao@gmail.com |
d554077d56003ee2515bec8fa3d8516a12b6c863 | 20a803a781be4c905c562b041bcd36d3bfb30631 | /google_auth.py | 0325f98582bbf15ae5195d3e16f8f14a88a79a09 | [] | no_license | OnlyInSpace/wtCal | e699260abfc37a9ec9b0d8427d4c7a99ac0781c1 | c846c3758cb8cd3ae620cbf4efd89ada34acd2f9 | refs/heads/master | 2023-04-28T03:16:39.456401 | 2021-05-04T18:55:13 | 2021-05-04T18:55:13 | 243,627,003 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,285 | py | import functools
import os
import flask
from authlib.client import OAuth2Session
import google.oauth2.credentials
import googleapiclient.discovery
ACCESS_TOKEN_URI = 'https://www.googleapis.com/oauth2/v4/token'
AUTHORIZATION_URL = 'https://accounts.google.com/o/oauth2/v2/auth?access_type=offline&prompt=consent'
AUTHORIZATION_SCOPE ='openid email profile'
AUTH_REDIRECT_URI = 'http://localhost:5000/google/auth'
BASE_URI = 'http://localhost:5000'
CLIENT_ID = ''
CLIENT_SECRET = ''
AUTH_TOKEN_KEY = 'auth_token'
AUTH_STATE_KEY = 'auth_state'
app = flask.Blueprint('google_auth', __name__)
def is_logged_in():
return True if AUTH_TOKEN_KEY in flask.session else False
def build_credentials():
if not is_logged_in():
raise Exception('User must be logged in')
oauth2_tokens = flask.session[AUTH_TOKEN_KEY]
return google.oauth2.credentials.Credentials(
oauth2_tokens['access_token'],
refresh_token=oauth2_tokens['refresh_token'],
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET,
token_uri=ACCESS_TOKEN_URI)
def get_user_info():
credentials = build_credentials()
oauth2_client = googleapiclient.discovery.build(
'oauth2', 'v2',
credentials=credentials)
return oauth2_client.userinfo().get().execute()
def no_cache(view):
@functools.wraps(view)
def no_cache_impl(*args, **kwargs):
response = flask.make_response(view(*args, **kwargs))
response.headers['Cache-Control'] = 'no-store, no-cache, must-revalidate, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '-1'
return response
return functools.update_wrapper(no_cache_impl, view)
@app.route('/google/login')
@no_cache
def login():
session = OAuth2Session(CLIENT_ID, CLIENT_SECRET,
scope=AUTHORIZATION_SCOPE,
redirect_uri=AUTH_REDIRECT_URI)
uri, state = session.authorization_url(AUTHORIZATION_URL)
flask.session[AUTH_STATE_KEY] = state
flask.session.permanent = True
return flask.redirect(uri, code=302)
@app.route('/google/auth')
@no_cache
def google_auth_redirect():
req_state = flask.request.args.get('state', default=None, type=None)
if req_state != flask.session[AUTH_STATE_KEY]:
response = flask.make_response('Invalid state parameter', 401)
return response
session = OAuth2Session(CLIENT_ID, CLIENT_SECRET,
scope=AUTHORIZATION_SCOPE,
state=flask.session[AUTH_STATE_KEY],
redirect_uri=AUTH_REDIRECT_URI)
oauth2_tokens = session.fetch_access_token(
ACCESS_TOKEN_URI,
authorization_response=flask.request.url)
flask.session[AUTH_TOKEN_KEY] = oauth2_tokens
return flask.redirect(BASE_URI, code=302)
@app.route('/google/logout')
@no_cache
def logout():
flask.session.pop(AUTH_TOKEN_KEY, None)
flask.session.pop(AUTH_STATE_KEY, None)
return flask.redirect(BASE_URI, code=302)
| [
"lsfite1@buffs.wtamu.edu"
] | lsfite1@buffs.wtamu.edu |
0d397a0c9a42ee5901823546ee716140764467c4 | 74d8510e4c06a015fcd10b4024e8a04cb918fce3 | /video.py | 95a1ae393f87b454070f15c76f577afc99be14c0 | [] | no_license | liuzhida/video_search | d4b0e8092300ef70058034ab9082fbef314639c5 | d0449caf4f2e2d4808118f6ef164ccb6b74a6d7a | refs/heads/master | 2021-01-16T18:31:06.247940 | 2013-05-10T11:21:13 | 2013-05-10T11:21:13 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 36,598 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import tornado.httpserver
import tornado.ioloop
import tornado.web
import tornado.options
from tornado import gen
from tornado import httpclient
from tornado.options import define, options
from tornado.escape import json_encode
import database
from mmseg import seg_txt
import redis
import json
import random
import re
import time
from jsonphandler import JSONPHandler
from BeautifulSoup import BeautifulSoup
from datetime import timedelta, date
def sha1(src):
import hashlib
mySha1 = hashlib.sha1()
mySha1.update(src)
mySha1_Digest = mySha1.hexdigest()
return mySha1_Digest
class BackupVideoHandler(tornado.web.RequestHandler):
def post(self):
id = self.request.body
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
d = redis.Redis(host='192.168.0.32', port=6379, db=3)
nows = time.strftime("%Y%m%d%H", time.localtime())
yes = date.today()-timedelta(days=1)
now = yes.strftime("%Y%m%d")
arg = self.request.arguments
flag = c.exists("id:%s"%id)
if flag:
down = c.hget("id:%s"%id,"down")
i = []
down = eval(down)
for _down in down.values():
i.append(_down)
#with open("/home/work/sohu/log/fail_down","a") as f:
# f.write("%s %s %s\n"%(nows,id,arg))
data = "%s##%s##failed"%(id,arg['version'][0])
with open("/home/work/video/log/data","a") as f:
f.write("%s\n"%data)
self.finish(json.dumps(i))
return
elif d.exists("id:%s"%id):
data = d.hgetall("id:%s"%id)
item = data.values()[0]
item = json.loads(item)
i = item['down']
self.finish(json.dumps(i))
return
else:
raise tornado.web.HTTPError(404)
self.finish()
return
class ImgHandler(tornado.web.RequestHandler):
def get(self,id):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
d = redis.Redis(host='192.168.0.32', port=6379, db=3)
e = redis.Redis(host='192.168.0.32', port=6379, db=5)
if id.startswith("singer") or id.startswith("show"):
img = e.hget("id:%s"%id,"img")
self.redirect(img, permanent=True)
return
elif c.exists("id:%s"%id):
img = c.hget("id:%s"%id,"img")
self.redirect(img, permanent=True)
return
elif d.exists("id:%s"%id):
data = d.hgetall("id:%s"%id)
item = json.loads(data.values()[0])
img = item['img']
self.redirect(img, permanent=True)
return
else:
raise tornado.web.HTTPError(404)
self.finish()
return
class SeturlHandler(tornado.web.RequestHandler):
def post(self,id):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
flag = c.exists("id:%s"%id)
if flag:
if id.startswith('T'):
raise tornado.web.HTTPError(404)
self.finish()
return
else:
#c.hset("id:%s"%id,url,json.dumps(item))
downurl = self.get_argument("downurl")
size = self.get_argument("size", None)
#self.write(downurl)
down = c.hget("id:%s"%id,"down")
down = eval(down)
down['wandoujia'] = downurl
c.hset("id:%s"%id,"down",json.dumps(down))
if size:
c.hset("id:%s"%id,"size",int(size))
self.finish("ok")
return
else:
raise tornado.web.HTTPError(404)
self.finish()
return
class GeturlHandler(tornado.web.RequestHandler):
def get(self,id):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
d = redis.Redis(host='192.168.0.32', port=6379, db=3)
e = redis.Redis(host='192.168.0.32', port=6379, db=5)
if id.startswith("show"):
down = e.hget("id:%s"%id,"down")
down = eval(down)
if down.has_key("wandoujia"):
downurl = down['wandoujia']
else:
downurl = down.values()[0]
self.redirect(downurl, permanent=True)
return
elif c.exists("id:%s"%id):
if id.startswith('T'):
raise tornado.web.HTTPError(404)
self.finish()
return
else:
down = c.hget("id:%s"%id,"down")
down = eval(down)
if down.has_key("wandoujia"):
downurl = down['wandoujia']
else:
downurl = down.values()[0]
if "itc.cn" in downurl:
downurl = downurl.split("&name")[0]
self.redirect(downurl, permanent=True)
return
elif d.exists("id:%s"%id):
data = d.hgetall("id:%s"%id)
item = json.loads(data.values()[0])
if item['type'] == 'tv':
raise tornado.web.HTTPError(404)
self.finish()
return
else:
downurl = item['down'][0]
self.redirect(downurl, permanent=True)
return
else:
raise tornado.web.HTTPError(404)
self.finish()
return
class VideoDownHandler(tornado.web.RequestHandler):
def get(self,id):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
if c.exists("id:%s"%id):
#if c.sismember("list:display",id) and c.exists("id:%s"%id):
item = c.hgetall("id:%s"%id)
if item.has_key("urls"):
item['urls'] = eval(item['urls'])
if id.startswith('T'):
#item['show'] = eval(item['show'])
item['show'] = json.loads(item['show'])
item['director'] = eval(item['director'])
item['actor'] = eval(item['actor'])
item['cate'] = eval(item['cate'])
elif id.startswith("M"):
item['director'] = eval(item['director'])
item['actor'] = eval(item['actor'])
item['cate'] = eval(item['cate'])
item['down'] = eval(item['down'])
else:
item['down'] = eval(item['down'])
self.finish(json.dumps(item))
return
else:
raise tornado.web.HTTPError(404)
self.finish()
return
class DetailHandler(JSONPHandler):
def get(self,id):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
flag = c.exists("id:%s"%id)
if flag:
item = c.hgetall("id:%s"%id)
id = item['id']
if "sohu" in item['url']:
item['from'] = u'搜狐视频'
elif "letv" in item['url']:
item['from'] = u'乐视'
#del item['url']
if id.startswith('M') or id.startswith("T"):
item['director'] = eval(item['director'])
item['actor'] = eval(item['actor'])
item['cate'] = eval(item['cate'])
#del item['img']
if item.has_key("down"):
del item['down']
if id.startswith('T'):
show = []
shows = json.loads(item['show'])
for i in shows:
#i = json.loads(i)
del i['url']
if i.has_key("urls"):
del i['urls']
del i['down']
try:
if u"季第" in i['title']:
titlenum = re.findall(ur"季第(.+?)集$", i['title'])
i['title'] = u"第"+titlenum[0]+u"集"
elif u"集" in i['title']:
titlenum = re.findall(ur"第(.+?)集$", i['title'])
i['title'] = u"第"+titlenum[0]+u"集"
else:
i['title'] = i['title'].replace(item['title'].decode("utf-8"), "")
titlenum = re.findall(ur"(\d+)", i['title'])
i['title'] = u"第"+str(titlenum[0]).lstrip(u"0")+u"集"
except:
i['title'] = i['title'].replace(item['title'].decode("utf-8"), "")
show.append(i)
item['show'] = show
self.finish(json.dumps(item))
return
else:
raise tornado.web.HTTPError(404)
self.finish()
return
def getlist(page,size,setname,flag):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
m = (int(page) - 1) * int(size)
n = int(page) * int(size)
length = c.zcard(setname)
nexts = length - n
if n > length:
n = length - 1
if m > length:
m = n - int(size) +1
nexts = 0
else:
n = n -1
if m < 0:
m = 0
ids = c.zrange(setname,m,n,desc=flag)
items = []
for id in ids:
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
item = c.hgetall("id:%s"%id)
item['director'] = eval(item['director'])
item['actor'] = eval(item['actor'])
item['cate'] = eval(item['cate'])
if item.has_key("urls"):
del item['urls']
del item['url']
del item['img']
if item['id'].startswith("M"):
del item['down']
else:
del item['show']
item['time'] = item['time'].replace("集更新", "集|更新至")
items.append(item)
data = {}
data['total'] = len(items)
data['next'] = nexts
data['items'] = items
return data
class SHandler(JSONPHandler):
def get(self):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
page = self.get_argument("page")
size = self.get_argument("size")
word = self.get_argument("word", None)
self.word = word
items = []
if word == "" or word == None:
data = {}
data['total'] = 0
data['next'] = 0
data['items'] = []
self.finish(json.dumps(data))
return
word = word.encode("utf-8")
words = []
indexs = []
for i in seg_txt(word):
if c.exists("indexs:"+i):
words.append(i)
_i = "indexs:"+i
indexs.append(_i)
new = "".join(words)
if len(indexs) ==1:
ids = c.smembers(indexs[0])
elif len(indexs) == 0:
data = {}
data['total'] = 0
data['next'] = 0
data['items'] = []
self.finish(json.dumps(data))
return
else:
c.sinterstore("indexs:%s"%new, *indexs)
ids = c.smembers("indexs:%s"%new)
for id in ids:
if not c.exists(id):
continue
if not c.sismember("list:display",id.replace("id:","")):
continue
_item = c.hgetall(id)
if _item.has_key("director"):
_item['director'] = eval(_item['director'])
if _item.has_key("actor"):
_item['actor'] = eval(_item['actor'])
if _item.has_key("cate"):
_item['cate'] = eval(_item['cate'])
if _item.has_key("urls"):
del _item['urls']
del _item['info']
#del _item['cate']
if _item.has_key("url"):
del _item['url']
if _item.has_key("img"):
del _item['img']
if _item['id'].startswith('M'):
del _item['down']
else:
del _item['show']
_item['time'] = _item['time'].replace("集更新", "集|更新至")
items.append(_item)
long = len(items)
m = (int(page) - 1) * int(size)
n = int(page) * int(size)
hot = len(items)
nexts = 0
if m < hot < n:
items = items[m:hot]
nexts = 0
elif n <= hot:
items = items[m:n-1]
nexts = hot -n
elif m > hot:
n = hot
m = n - (int(size)) + 1
if m < 0:
m = 0
items = items[m:n-1]
nexts = 0
data = {}
data['total'] = len(items)
data['next'] = nexts
data['items'] = items
self.finish(json.dumps(data))
return
class SearchHandler(JSONPHandler):
def get(self):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
page = self.get_argument("page")
size = self.get_argument("size")
word = self.get_argument("word", None)
self.word = word
cate = self.get_argument("cate", None)
if cate:
flag = True
setname = "cate:%s"%cate
data = getlist(page,size,setname,flag)
self.finish(json.dumps(data))
return
items = []
if word == "" or word == None:
data = {}
data['total'] = 0
data['next'] = 0
data['items'] = []
self.finish(json.dumps(data))
return
word =word.encode("utf-8")
words = []
indexs = []
for i in seg_txt(word):
if c.exists("indexs:"+i):
words.append(i)
_i = "indexs:"+i
indexs.append(_i)
new = "".join(words)
if len(indexs) ==1:
#c.sinterstore("indexs:%s"%new, indexs[0])
#c.zunionstore("indexss:%s"%new, indexs[0])
ids = c.smembers(indexs[0])
elif len(indexs) == 0:
data = {}
data['total'] = 0
data['next'] = 0
data['items'] = []
self.finish(json.dumps(data))
return
else:
c.sinterstore("indexs:%s"%new, *indexs)
#c.zunionstore("indexss:%s"%new, *indexs)
ids = c.smembers("indexs:%s"%new)
#ids = c.zrange("indexss:%s"%new,0,-1)
for id in ids:
if not c.exists(id):
continue
if not c.sismember("list:display",id.replace("id:","")):
continue
_item = c.hgetall(id)
if _item.has_key("director"):
_item['director'] = eval(_item['director'])
if _item.has_key("actor"):
_item['actor'] = eval(_item['actor'])
if _item.has_key("cate"):
_item['cate'] = eval(_item['cate'])
if _item.has_key("urls"):
del _item['urls']
#del _item['info']
#del _item['cate']
if _item.has_key("url"):
del _item['url']
#del _item['year']
if _item.has_key("img"):
del _item['img']
if _item['id'].startswith('M'):
del _item['down']
else:
del _item['show']
_item['time'] = _item['time'].replace("集更新", "集|更新至")
items.append(_item)
long = len(items)
m = (int(page) - 1) * int(size)
n = int(page) * int(size)
hot = len(items)
nexts = 0
if m < hot < n:
items = items[m:hot]
nexts = 0
elif n <= hot:
items = items[m:n-1]
nexts = hot -n
elif m > hot:
n = hot
m = n - (int(size)) + 1
if m < 0:
m = 0
items = items[m:n-1]
nexts = 0
data = {}
data['total'] = len(items)
data['next'] = nexts
data['items'] = items
self.finish(json.dumps(data))
return
class TvnewHandler(JSONPHandler):
def get(self):
page = self.get_argument("page")
size = self.get_argument("size")
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
now = time.strftime("%Y%m%d", time.localtime())
m = (int(page) - 1) * int(size)
n = int(page) * int(size)
list = c.zcard("new:tv")
nexts = list -n
items = []
if n <= list:
x1 = m
x2 = n -1
_items = c.zrange("new:tv",x1,x2)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = c.hgetall("id:%s"%id)
_item['director'] = eval(_item['director'])
_item['actor'] = eval(_item['actor'])
_item['cate'] = eval(_item['cate'])
del _item['info']
if _item.has_key("urls"):
del _item['urls']
del _item['cate']
del _item['url']
#del _item['type']
del _item['year']
del _item['img']
del _item['show']
_item['time'] = _item['time'].replace("集更新", "集|更新至")
if "|" in _item['time']:
_item['time'] = _item['time'].split("|")[1]
#except:
# continue
items.append(_item)
elif n > list:
x2 = list - 1
if m > list:
x1 = x2 -int(size) +1
if x1 <0:
x1 = 0
nexts = 0
_items = c.zrange("new:tv",x1,x2)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = c.hgetall("id:%s"%id)
if _item.has_key("img"):
_item['director'] = eval(_item['director'])
_item['actor'] = eval(_item['actor'])
_item['cate'] = eval(_item['cate'])
if _item.has_key("urls"):
del _item['urls']
del _item['info']
del _item['cate']
del _item['url']
del _item['show']
#del _item['type']
del _item['year']
del _item['img']
items.append(_item)
data = {}
data['total'] = len(items)
data['next'] = nexts
data['items'] = items
self.finish(json.dumps(data))
return
class TvhotHandler(JSONPHandler):
def get(self):
page = self.get_argument("page")
size = self.get_argument("size")
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
now = time.strftime("%Y%m%d", time.localtime())
m = (int(page) - 1) * int(size)
n = int(page) * int(size)
list = c.zcard("week:tv")
nexts = list -n
items = []
if n <= list:
x1 = m
x2 = n -1
_items = c.zrange("week:tv",x1,x2,desc=False)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = c.hgetall("id:%s"%id)
_item['director'] = eval(_item['director'])
_item['actor'] = eval(_item['actor'])
_item['cate'] = eval(_item['cate'])
del _item['info']
if _item.has_key("urls"):
del _item['urls']
del _item['cate']
del _item['url']
#del _item['type']
del _item['year']
del _item['img']
del _item['show']
_item['time'] = _item['time'].replace("集更新", "集|更新至")
if "|" in _item['time']:
_item['time'] = _item['time'].split("|")[1]
#except:
# continue
items.append(_item)
elif n > list:
x2 = list - 1
if m > list:
x1 = x2 -int(size) +1
if x1 < 0:
x1 = 0
nexts = 0
_items = c.zrange("week:tv",x1,x2,desc=False)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = c.hgetall("id:%s"%id)
if _item.has_key("img"):
_item['director'] = eval(_item['director'])
_item['actor'] = eval(_item['actor'])
_item['cate'] = eval(_item['cate'])
if _item.has_key("urls"):
del _item['urls']
del _item['info']
del _item['cate']
del _item['url']
del _item['show']
#del _item['type']
del _item['year']
del _item['img']
items.append(_item)
data = {}
data['total'] = len(items)
data['next'] = nexts
data['items'] = items
self.finish(json.dumps(data))
return
class MvnewHandler(JSONPHandler):
def get(self):
page = self.get_argument("page")
self.page = page
size = self.get_argument("size")
self.size = size
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
now = time.strftime("%Y%m%d", time.localtime())
m = (int(page) - 1) * int(size)
n = int(page) * int(size)
list = c.zcard("new:movie")
nexts = list -n
items = []
if n <= list:
x1 = m
x2 = n -1
_items = c.zrange("new:movie",x1,x2)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = c.hgetall("id:%s"%id)
#_item = _item.values()[0]
#_item = json.loads(_item)
_item['director'] = eval(_item['director'])
_item['actor'] = eval(_item['actor'])
#_item['cate'] = eval(_item['cate'])
if _item.has_key("urls"):
del _item['urls']
del _item['info']
del _item['cate']
del _item['url']
del _item['down']
del _item['type']
del _item['year']
del _item['area']
del _item['img']
items.append(_item)
elif n > list:
x2 = list - 1
if m > list:
x1 = x2 -int(size) +1
if x1 < 0:
x1 = 0
nexts = 0
items = []
_items = c.zrange("new:movie",x1,x2)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = c.hgetall("id:%s"%id)
if _item.has_key("urls"):
del _item['urls']
del _item['info']
del _item['cate']
del _item['url']
del _item['down']
del _item['type']
del _item['year']
del _item['area']
del _item['img']
_item['director'] = eval(_item['director'])
_item['actor'] = eval(_item['actor'])
_item['cate'] = eval(_item['cate'])
items.append(_item)
data = {}
data['total'] = len(items)
data['next'] = nexts
data['items'] = items
self.finish(json_encode(data))
return
class MvhotHandler(JSONPHandler):
def get(self):
page = self.get_argument("page")
self.page = page
size = self.get_argument("size")
self.size = size
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
now = time.strftime("%Y%m%d", time.localtime())
m = (int(page) - 1) * int(size)
n = int(page) * int(size)
list = c.zcard("week:movie")
nexts = list -n
items = []
if n <= list:
x1 = m
x2 = n -1
_items = c.zrange("week:movie",x1,x2)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = c.hgetall("id:%s"%id)
#_item = _item.values()[0]
#_item = json.loads(_item)
_item['director'] = eval(_item['director'])
_item['actor'] = eval(_item['actor'])
#_item['cate'] = eval(_item['cate'])
if _item.has_key("urls"):
del _item['urls']
del _item['info']
del _item['cate']
del _item['url']
del _item['down']
del _item['type']
del _item['year']
del _item['area']
del _item['img']
items.append(_item)
elif n > list:
x2 = list - 1
if m > list:
x1 = x2 -int(size) +1
if x1 < 0:
x1 = 0
nexts = 0
items = []
_items = c.zrange("week:movie",x1,x2)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = c.hgetall("id:%s"%id)
if _item.has_key("urls"):
del _item['urls']
del _item['info']
del _item['cate']
del _item['url']
del _item['down']
del _item['type']
del _item['year']
del _item['area']
del _item['img']
_item['director'] = eval(_item['director'])
_item['actor'] = eval(_item['actor'])
_item['cate'] = eval(_item['cate'])
items.append(_item)
data = {}
data['total'] = len(items)
data['next'] = nexts
data['items'] = items
self.finish(json_encode(data))
return
class CateHandler(JSONPHandler):
def get(self):
m_cate = [
{ "title":"传记片", "id":"m_zhuanji"},
{ "title":"伦理片", "id":"m_lunli"},
{ "title":"剧情片", "id":"m_juqing"},
{ "title":"动作片", "id":"m_dongzuo"},
{ "title":"动画片", "id":"m_donghua"},
{ "title":"历史片", "id":"m_lishi"},
{ "title":"喜剧片", "id":"m_xiju"},
{ "title":"恐怖片", "id":"m_kongbu"},
{ "title":"悬疑片", "id":"m_xuanyi"},
{ "title":"惊悚片", "id":"m_jingsong"},
{ "title":"战争片", "id":"m_zhanzheng"},
{ "title":"歌舞片", "id":"m_gewu"},
{ "title":"武侠片", "id":"m_wuxia"},
{ "title":"灾难片", "id":"m_zainan"},
{ "title":"爱情片", "id":"m_aiqing"},
{ "title":"短片", "id":"m_duan"},
{ "title":"科幻片", "id":"m_kehuan"},
{ "title":"纪录片", "id":"m_jilu"},
{ "title":"警匪片", "id":"m_jingfei"},
{ "title":"风月片", "id":"m_fengyue"},
{ "title":"魔幻片", "id":"m_mohuan"},
{ "title":"青春片", "id":"m_qingchun"},
{ "title":"文艺片", "id":"m_wenyi"},
{ "title":"谍战片", "id":"m_diezhan"}
]
t_cate = [
{ "title":"伦理剧", "id":"t_lunli"},
{ "title":"偶像剧", "id":"t_ouxiang"},
{ "title":"军旅剧", "id":"t_junlv"},
{ "title":"刑侦剧", "id":"t_xingzhen"},
{ "title":"剧情片", "id":"t_juqing"},
{ "title":"动作剧", "id":"t_dongzuo"},
{ "title":"历史剧", "id":"t_lishi"},
{ "title":"古装剧", "id":"t_guzhuang"},
{ "title":"喜剧片", "id":"t_xiju"},
{ "title":"家庭剧", "id":"t_jiating"},
{ "title":"悬疑剧", "id":"t_xuanyi"},
{ "title":"情景剧", "id":"t_qingjing"},
{ "title":"战争剧", "id":"t_zhanzheng"},
{ "title":"武侠剧", "id":"t_wuxia"},
{ "title":"科幻剧", "id":"t_kehuan"},
{ "title":"谍战剧", "id":"t_diezhan"},
{ "title":"都市剧", "id":"t_dushi"},
{ "title":"神话剧", "id":"t_shenhua"},
{ "title":"言情剧", "id":"t_yanqing"},
{ "title":"年代剧", "id":"t_niandai"},
{ "title":"农村剧", "id":"t_nongcun"},
{ "title":"惊悚剧", "id":"t_jingsong"},
{ "title":"传记剧", "id":"t_zhuanji"},
{ "title":"灾难剧", "id":"t_zainan"},
{ "title":"犯罪剧", "id":"t_fanzui"},
{ "title":"生活剧", "id":"t_shenghuo"},
{ "title":"经典剧", "id":"t_jingdian"}
]
cate = {}
cate['movie'] = m_cate
cate['tv'] = t_cate
self.finish(json.dumps(cate))
return
class WordHandler(JSONPHandler):
def get(self):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
now = time.strftime("%Y%m%d", time.localtime())
size = self.get_argument("size")
if c.exists("word:%s"%now):
titles = c.smembers("word:%s"%now)
else:
db = database.MysqlHander()
title = db.getword()
for ti in title:
key = sha1(ti.encode("utf-8"))[:7]
for i in c.keys("id:M%s*"%key):
if not c.sismember("list:display",i.replace("id:","")):
continue
if c.exists(i):
c.sadd("word:%s"%now,ti)
for i in c.keys("id:T%s*"%key):
if not c.sismember("list:display",i.replace("id:","")):
continue
if c.exists(i):
c.sadd("word:%s"%now,ti)
titles = c.smembers("word:%s"%now)
titles = list(titles)
random.shuffle(titles)
titles = titles[:int(size)]
return self.finish(json.dumps(titles))
class UrlHandler(tornado.web.RequestHandler):
def get(self,id):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
flag = c.exists("id:%s"%id)
if flag:
url = c.hget("id:%s"%id,"url")
if id.startswith("T"):
if "tv.sohu.com" in url:
show = c.hget("id:%s"%id,"show")
show = eval(show)
url = show[0]['url']
self.redirect(url, permanent=True)
else:
self.redirect(url, permanent=True)
return
else:
raise tornado.web.HTTPError(404)
self.finish()
return
class CheckHandler(tornado.web.RequestHandler):
def get(self):
self.finish("ok")
return
class MiniHandler(tornado.web.RequestHandler):
def get(self):
c = redis.Redis(host='192.168.0.32', port=6379, db=4)
newtv = []
_items = c.zrange("new:tv",0,7,desc=False)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = {}
_item['id'] = id
_item['title'] = c.hget("id:%s"%id,"title")
_item['time'] = c.hget("id:%s"%id,"time")
_item['time'] = _item['time'].replace("集更新", "集|更新至")
if "更新" in _item['time']:
_item['time'] = "更"+ _item['time'].split("更")[1]
newtv.append(_item)
toptv = []
_items = c.zrange("week:tv",0,9,desc=False)
x = 0
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = {}
if x == 0:
_item['info'] = c.hget("id:%s"%id,"info")
x += 1
_item['id'] = id
_item['title'] = c.hget("id:%s"%id,"title")
_item['score'] = c.hget("id:%s"%id,"score")
toptv.append(_item)
newmovie = []
_items = c.zrange("new:movie",0,7,desc=False)
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = {}
_item['id'] = id
_item['title'] = c.hget("id:%s"%id,"title")
_item['time'] = c.hget("id:%s"%id,"time")
_item['time'] = _item['time'].replace("集更新", "集|更新至")
if "更新" in _item['time']:
_item['time'] = "更"+ _item['time'].split("更")[1]
newmovie.append(_item)
topmovie = []
_items = c.zrange("week:movie",0,9,desc=False)
x = 0
for i in _items:
id = i
if not c.exists("id:%s"%id):
continue
if not c.sismember("list:display",id):
continue
_item = {}
if x == 0:
_item['info'] = c.hget("id:%s"%id,"info")
x += 1
_item['id'] = id
_item['title'] = c.hget("id:%s"%id,"title")
_item['score'] = c.hget("id:%s"%id,"score")
topmovie.append(_item)
self.render("index.html",newtv=newtv,toptv=toptv,topmovie=topmovie,newmovie=newmovie)
def main():
define("port", default=9990, help="run on the given port", type=int)
settings = {"debug": False, "template_path": "templates",
"cookie_secret": "z1DAVh+WTvyqpWGmOtJCQLETQYUznEuYskSF062J0To="}
tornado.options.parse_command_line()
application = tornado.web.Application([
(r"/api/detail/(.*)", DetailHandler),
(r"/api/url/(.*)", UrlHandler),
(r"/api/downdetail/(.*)", VideoDownHandler),
(r"/api/down", BackupVideoHandler),
(r"/api/down/(.*)", GeturlHandler),
(r"/api/set/(.*)", SeturlHandler),
(r"/api/s", SearchHandler),
(r"/api/ss", SHandler),
(r"/api/movie/hot", MvhotHandler),
(r"/api/movie/new", MvnewHandler),
(r"/api/tv/new", TvnewHandler),
(r"/api/tv/hot", TvhotHandler),
(r"/api/cate", CateHandler),
(r"/api/img/(.*)", ImgHandler),
(r"/api/hot/word", WordHandler),
(r"/api/mini", MiniHandler),
(r"/api/proxycheck", CheckHandler),
], **settings)
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(options.port)
tornado.ioloop.IOLoop.instance().start()
if __name__ == "__main__":
main()
| [
"liuzhida@me.com"
] | liuzhida@me.com |
f41245cd7b6c4c6f51cc20060895c335835d5426 | 893ea9ae53aca0c4aac0ded55a8feb79105aad86 | /Program6.py | b96cc1dac7eb6215950b44914c1f80c539f068ee | [] | no_license | Saifullahshaikh/LAB-05 | 11aa0ad1f1529d08fd190fa21fc39f62c900f17c | 3b98b0f2c95e68d22f93c70ad1c0ca7396413894 | refs/heads/master | 2020-04-16T06:42:23.957993 | 2019-01-12T06:59:19 | 2019-01-12T06:59:19 | 165,357,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 212 | py | print('saifullah, 18B-092-CS, A')
print('LAB-05, Program 6')
def CubeValues():
lst = list()
for i in range(1,31):
lst.append(i**3)
print(lst[:6])
print(lst[-6:])
CubeValues()
| [
"noreply@github.com"
] | Saifullahshaikh.noreply@github.com |
eb428a52af1d0fa78308e55321bf38abd6dd1f8b | be0c2a4b3258e6e2e43285ef02ddce837260d61c | /2020_03_10/byroncbr_226.py | e01970246fd752e251fbc8df1f02c5baab579725 | [] | no_license | university-of-southampton-1920/qiuzhao2020 | 7da11d87c7ba90b38025d35efde146a32597c875 | c9118186ebdd905607e69d62f9961a6dccf31eeb | refs/heads/master | 2020-12-19T15:42:58.400641 | 2020-06-07T19:33:18 | 2020-06-07T19:33:18 | 235,778,110 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 394 | py | """
Name: byroncbr_226.py
Author: bangrenc
Time: 10/3/2020 11:16 PM
"""
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def invertTree(self, root: TreeNode) -> TreeNode:
if root:
root.left, root.right = self.invertTree(root.right), self.invertTree(root.left)
return root
| [
"noreply@github.com"
] | university-of-southampton-1920.noreply@github.com |
1f2424d5b24baaab7fe1c1ce30f92fcfc2ec1dd1 | 7ea5517353f325fc0bcc0e552233b103335bc9ec | /QUANTAXIS/QAMarket/common.py | 143b325f56b9ba86e312c9a8f7bbeee7f3dbd0fd | [
"MIT"
] | permissive | lxqjswa/QUANTAXIS | 304f20c3ba957d51664a8736faca6a777a658583 | a5f89b28a75d1a5094630a4ed166f596840528b1 | refs/heads/master | 2020-03-28T08:45:47.064394 | 2018-09-08T07:15:26 | 2018-09-08T07:15:26 | 147,987,895 | 1 | 0 | MIT | 2018-09-09T02:52:34 | 2018-09-09T02:52:34 | null | UTF-8 | Python | false | false | 5,031 | py |
# shipane
# "申报时间", "证券代码", "证券名称", "操作", "委托状态", "委托数量", "成交数量", "撤消数量", , "委托价格", "成交均价", "合同编号", "委托子业务", "约定号", "对方账户", "参考汇率", "结算币种", "交易币种", "证券中文名", "出错信息
from QUANTAXIS.QAUtil.QAParameter import ORDER_DIRECTION, TRADE_STATUS, ORDER_STATUS
cn_en_compare = {'明细': 'id',
'证券代码': 'code',
'市场代码': 'market_code',
'证券名称': 'name',
'股票余额': 'amount',
'可用余额': 'sell_available',
'冻结数量': 'frozen',
'买卖标志': 'towards',
'撤消数量': 'cancel_amount',
'撤单数量': 'cancel_amount',
'订单类型': 'order_type',
'操作': 'towards', # 这个是模拟交易的买卖标志
'委托价格': 'order_price',
'委托数量': 'order_amount',
'成交价格': 'trade_price',
'成交数量': 'trade_amount',
'状态说明': 'status',
'备注': 'status', # 这个是模拟交易的status
'场外撤单': 'cancel_outside',
'场内撤单': 'cancel_inside',
'未成交': 'pending',
'全部撤单': 'cancel_all',
'委托时间': 'order_time',
'合同编号': 'realorder_id', # 模拟交易的委托编号
'撤销数量': 'cancel_amount',
'委托编号': 'realorder_id',
'批次号': 'pc_id',
'盈亏': 'pnl',
"": 'None',
'成本金额': 'cost',
'盈亏估算': 'pnl_prob',
'成本价': 'hold_price',
'实现盈亏': 'pnl_money_already',
'盈亏比例(%)': 'pnl_ratio',
'市价': 'price',
'市值': 'market_value',
'交易市场': 'SSE',
'股东帐户': 'shareholders',
'实际数量': 'total_amount',
'可申赎数量': 'redemption_number',
'资讯': 'message',
'汇率': 'exchange_rate',
'沪港深港市场': 'hkmarket',
'成本价港币': 'hold_price_hk',
'买入成本价港币': 'buy_price_hk',
'买入在途数量': 'buy_onway',
'卖出在途数量': 'sell_onway',
'场内废单': 'failled',
'场外撤单': 'cancel_outside',
'场内撤单': 'cancel_inside',
'未成交': 'pending',
'已成交': 'finished',
'全部撤单': 'cancel_all',
'成交均价': 'trade_price', # 成交价
'成交金额': 'trade_money',
'成交编号': 'trade_id',
'委托状态': 'status',
'申报时间': 'order_time',
'委托日期': 'order_date',
'委托子业务': 'order_subjob',
'约定号': 'yd_id',
'对方账户': 'other_account',
'参考汇率': 'refer_exchange',
'结算币种': 'settlement_currency',
'交易币种': 'trade_currency',
'证券中文名': 'CNname',
'出错信息': 'error',
'成交时间': 'trade_time'}
trade_towards_cn_en = {
'买入': ORDER_DIRECTION.BUY,
'买': ORDER_DIRECTION.BUY,
'卖出': ORDER_DIRECTION.SELL,
'卖': ORDER_DIRECTION.SELL,
'申购': ORDER_DIRECTION.ASK,
'申': ORDER_DIRECTION.ASK,
'证券买入': ORDER_DIRECTION.BUY,
'证券卖出': ORDER_DIRECTION.SELL,
'派息': ORDER_DIRECTION.XDXR,
'': ORDER_DIRECTION.OTHER
}
order_status_cn_en = {
'已报': ORDER_STATUS.QUEUED, # 委托已经被交易端接受了
'未成交': ORDER_STATUS.QUEUED,
'已确认': ORDER_STATUS.QUEUED, # 新股申购已经被交易端接受
'场内废单': ORDER_STATUS.FAILED,
'废单': ORDER_STATUS.FAILED, # 委托不符合交易规则,被交易端拒绝了
'未报': ORDER_STATUS.FAILED, # 委托还没有被交易端接受
'场外废单': ORDER_STATUS.FAILED,
'已成交': ORDER_STATUS.SUCCESS_ALL,
'已成': ORDER_STATUS.SUCCESS_ALL,
'全部成交': ORDER_STATUS.SUCCESS_ALL,
'部成': ORDER_STATUS.SUCCESS_PART, # 委托已经成交了一部份
'已撤单': ORDER_STATUS.CANCEL_ALL,
'全部撤单': ORDER_STATUS.CANCEL_ALL,
'已撤': ORDER_STATUS.CANCEL_ALL,
'已报待撤': ORDER_STATUS.QUEUED, # 已经申报了撤单,交易端也已接受,但目前可能因为还没在交易时间段,所以还在等待撤消
'场内撤单': ORDER_STATUS.CANCEL_ALL,
}
| [
"yutiansut@qq.com"
] | yutiansut@qq.com |
e53c50114defbb9001385514940c7f56071976fb | 20c20938e201a0834ccf8b5f2eb5d570d407ad15 | /abc032/abc032_b/8108449.py | b7cc2e87327a45202a747c78434008246dab432c | [] | no_license | kouhei-k/atcoder_submissions | 8e1a1fb30c38e0d443b585a27c6d134bf1af610a | 584b4fd842ccfabb16200998fe6652f018edbfc5 | refs/heads/master | 2021-07-02T21:20:05.379886 | 2021-03-01T12:52:26 | 2021-03-01T12:52:26 | 227,364,764 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 159 | py | S=input()
k=int(input())
import collections
table=collections.defaultdict(int)
for i in range(len(S)-k+1):
table[S[i:k+i]]+=1
print(len(list(table.keys())))
| [
"kouhei.k.0116@gmail.com"
] | kouhei.k.0116@gmail.com |
cb0e122b4c0e9234e1f0f0e11d6affdfaed10c6a | aa0270b351402e421631ebc8b51e528448302fab | /sdk/network/azure-mgmt-frontdoor/azure/mgmt/frontdoor/aio/operations/_experiments_operations.py | 3447700b37de7b3edc5130e30a769d21572d3f89 | [
"MIT",
"LGPL-2.1-or-later",
"LicenseRef-scancode-generic-cla"
] | permissive | fangchen0601/azure-sdk-for-python | d04a22109d0ff8ff209c82e4154b7169b6cb2e53 | c2e11d6682e368b2f062e714490d2de42e1fed36 | refs/heads/master | 2023-05-11T16:53:26.317418 | 2023-05-04T20:02:16 | 2023-05-04T20:02:16 | 300,440,803 | 0 | 0 | MIT | 2020-10-16T18:45:29 | 2020-10-01T22:27:56 | null | UTF-8 | Python | false | false | 38,260 | py | # pylint: disable=too-many-lines
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
import sys
from typing import Any, AsyncIterable, Callable, Dict, IO, Optional, TypeVar, Union, cast, overload
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.tracing.decorator_async import distributed_trace_async
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._experiments_operations import (
build_create_or_update_request,
build_delete_request,
build_get_request,
build_list_by_profile_request,
build_update_request,
)
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExperimentsOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.frontdoor.aio.FrontDoorManagementClient`'s
:attr:`experiments` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_profile(
self, resource_group_name: str, profile_name: str, **kwargs: Any
) -> AsyncIterable["_models.Experiment"]:
"""Gets a list of Experiments.
Gets a list of Experiments.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either Experiment or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
cls: ClsType[_models.ExperimentList] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_profile_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_profile.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
request = HttpRequest("GET", next_link)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("ExperimentList", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_profile.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments"
}
@distributed_trace_async
async def get(
self, resource_group_name: str, profile_name: str, experiment_name: str, **kwargs: Any
) -> _models.Experiment:
"""Gets an Experiment by ExperimentName.
Gets an Experiment by ExperimentName.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: Experiment or the result of cls(response)
:rtype: ~azure.mgmt.frontdoor.models.Experiment
:raises ~azure.core.exceptions.HttpResponseError:
"""
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
request = build_get_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.get.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
async def _create_or_update_initial(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: Union[_models.Experiment, IO],
**kwargs: Any
) -> _models.Experiment:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "Experiment")
request = build_create_or_update_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._create_or_update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 201, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Experiment", pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize("Experiment", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_create_or_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: _models.Experiment,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Creates or updates an Experiment.
Creates or updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment resource. Required.
:type parameters: ~azure.mgmt.frontdoor.models.Experiment
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_create_or_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Creates or updates an Experiment.
Creates or updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment resource. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_create_or_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: Union[_models.Experiment, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Creates or updates an Experiment.
Creates or updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment resource. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.frontdoor.models.Experiment or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_create_or_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
async def _update_initial(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: Union[_models.ExperimentUpdateModel, IO],
**kwargs: Any
) -> _models.Experiment:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
content_type = content_type or "application/json"
_json = None
_content = None
if isinstance(parameters, (IO, bytes)):
_content = parameters
else:
_json = self._serialize.body(parameters, "ExperimentUpdateModel")
request = build_update_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
content_type=content_type,
json=_json,
content=_content,
template_url=self._update_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize("Experiment", pipeline_response)
if response.status_code == 202:
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {}) # type: ignore
return deserialized # type: ignore
_update_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
@overload
async def begin_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: _models.ExperimentUpdateModel,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Updates an Experiment by Experiment id.
Updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment Update Model. Required.
:type parameters: ~azure.mgmt.frontdoor.models.ExperimentUpdateModel
:keyword content_type: Body Parameter content-type. Content type parameter for JSON body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@overload
async def begin_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: IO,
*,
content_type: str = "application/json",
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Updates an Experiment by Experiment id.
Updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment Update Model. Required.
:type parameters: IO
:keyword content_type: Body Parameter content-type. Content type parameter for binary body.
Default value is "application/json".
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
@distributed_trace_async
async def begin_update(
self,
resource_group_name: str,
profile_name: str,
experiment_name: str,
parameters: Union[_models.ExperimentUpdateModel, IO],
**kwargs: Any
) -> AsyncLROPoller[_models.Experiment]:
"""Updates an Experiment by Experiment id.
Updates an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:param parameters: The Experiment Update Model. Is either a model type or a IO type. Required.
:type parameters: ~azure.mgmt.frontdoor.models.ExperimentUpdateModel or IO
:keyword content_type: Body Parameter content-type. Known values are: 'application/json'.
Default value is None.
:paramtype content_type: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either Experiment or the result of
cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.frontdoor.models.Experiment]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = case_insensitive_dict(kwargs.pop("headers", {}) or {})
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
content_type: Optional[str] = kwargs.pop("content_type", _headers.pop("Content-Type", None))
cls: ClsType[_models.Experiment] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._update_initial(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
parameters=parameters,
api_version=api_version,
content_type=content_type,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize("Experiment", pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_update.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
async def _delete_initial( # pylint: disable=inconsistent-return-statements
self, resource_group_name: str, profile_name: str, experiment_name: str, **kwargs: Any
) -> None:
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
request = build_delete_request(
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self._delete_initial.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.ErrorResponse, pipeline_response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
@distributed_trace_async
async def begin_delete(
self, resource_group_name: str, profile_name: str, experiment_name: str, **kwargs: Any
) -> AsyncLROPoller[None]:
"""Deletes an Experiment.
Deletes an Experiment.
:param resource_group_name: Name of the Resource group within the Azure subscription. Required.
:type resource_group_name: str
:param profile_name: The Profile identifier associated with the Tenant and Partner. Required.
:type profile_name: str
:param experiment_name: The Experiment identifier associated with the Experiment. Required.
:type experiment_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: By default, your polling method will be AsyncARMPolling. Pass in False for
this operation to not poll, or pass in your own initialized polling object for a personal
polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no
Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2019-11-01"] = kwargs.pop("api_version", _params.pop("api-version", "2019-11-01"))
cls: ClsType[None] = kwargs.pop("cls", None)
polling: Union[bool, AsyncPollingMethod] = kwargs.pop("polling", True)
lro_delay = kwargs.pop("polling_interval", self._config.polling_interval)
cont_token: Optional[str] = kwargs.pop("continuation_token", None)
if cont_token is None:
raw_result = await self._delete_initial( # type: ignore
resource_group_name=resource_group_name,
profile_name=profile_name,
experiment_name=experiment_name,
api_version=api_version,
cls=lambda x, y, z: x,
headers=_headers,
params=_params,
**kwargs
)
kwargs.pop("error_map", None)
def get_long_running_output(pipeline_response): # pylint: disable=inconsistent-return-statements
if cls:
return cls(pipeline_response, None, {})
if polling is True:
polling_method: AsyncPollingMethod = cast(AsyncPollingMethod, AsyncARMPolling(lro_delay, **kwargs))
elif polling is False:
polling_method = cast(AsyncPollingMethod, AsyncNoPolling())
else:
polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output,
)
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method) # type: ignore
begin_delete.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/NetworkExperimentProfiles/{profileName}/Experiments/{experimentName}"
}
| [
"noreply@github.com"
] | fangchen0601.noreply@github.com |
f085ce927f914d7feac5640390c519859f1df241 | 53e3e676b66e4ed6bbf7941c7e78c2820fcbed59 | /build_isolated/rospack/test/test/utest.py | c5cf969afefe057c5a2c2fca0f9db7480002ba66 | [] | no_license | daichi08/catkin_ws_atPi | 0bdc3e5f2c7073d888a2f6109c0842521c99104e | 9690697e1d432f06c5ee4570a0e7d1a2cc7c44ed | refs/heads/master | 2020-03-22T21:09:04.291933 | 2018-08-02T06:10:41 | 2018-08-02T06:10:41 | 140,661,209 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 38,742 | py | # Software License Agreement (BSD License)
#
# Copyright (c) 2008, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# Author: Brian Gerkey/Ken Conley
import os
import unittest
import tempfile
import shutil
import sys
import platform
from subprocess import Popen, PIPE
ROS_PACKAGE_PATH = 'ROS_PACKAGE_PATH'
ROS_LANG_DISABLE = 'ROS_LANG_DISABLE'
ROSPACK_PATH = "/home/pi/catkin_ws/devel_isolated/rospack/bin/rospack"
# Set the initial CWD, so that we can set it back later.
# Go up one directory. Have to do this because nosetests doesn't seem to
# run properly from the parent directory, even with --where= passed in.
initial_cwd = os.path.dirname(os.path.dirname(__file__))
os.chdir(initial_cwd)
_structure_test_p = os.path.abspath('structure_test')
# expected layout of the structure_test directory, used for rospack find and list tests
structure_test = {
'package1': 'package1',
'package2': 'package2',
'package3': 'subdir1/package3',
'package4': 'subdir1/subdir1_1/package4',
'package5': 'subdir1/subdir1_1/package5',
'package6': 'subdir3/package6',
'package7': 'subdir3/package7',
}
# canonicalize
for k in structure_test.keys():
structure_test[k] = os.path.abspath(os.path.join(_structure_test_p, structure_test[k]))
aliases = {
'deps': 'depends',
'deps1': 'depends1',
'deps-manifests': 'depends-manifests',
'deps-indent': 'depends-indent',
'rosdep': 'rosdeps',
'rosdep0': 'rosdeps0'
}
## Process-level tests of rospack executable
class RospackTestCase(unittest.TestCase):
def setUp(self):
# Some tests change CWD
os.chdir(initial_cwd)
## runs rospack with ROS_PACKAGE_PATH set to ./test
## @return int, str: return code, stdout
def _run_rospack(self, ros_package_path, pkgname, command):
env = os.environ.copy()
if ros_package_path is not None:
env[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in env:
del env[ROS_PACKAGE_PATH]
# Must split up the command string into its whitespace separated
# components; otherwise you get multiple words as one element of
# argv.
#args = ["rospack", command, pkgname]
args = [ROSPACK_PATH]
if command:
for s in command.split():
args.append(s)
if pkgname is not None:
args.append(pkgname)
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
stdout, stderr = p.communicate()
# Also test command aliases, verifying that they give the same
# return code and console output
if command:
cmd = command.split()[-1]
if cmd in aliases:
args[-2] = aliases[cmd]
alias_p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
alias_stdout, alias_stderr = alias_p.communicate()
self.assertEquals(p.returncode, alias_p.returncode)
self.assertEquals(stdout, alias_stdout)
#self.assertEquals(stderr, alias_stderr)
# rospack should only yield non-negative return codes. A negative
# return code indicates a crash (e.g., SIGSEGV, SIGABORT), which is
# never ok.
if p.returncode < 0:
self.fail('rospack returned non-zero exit code (%d), indicating a crash'%(p.returncode))
return p.returncode, stdout.strip().decode('ascii'), stderr.decode('ascii')
################################################################################
# HELPER ROUTINES
# NOTE: helpers with the 'e' prefix take in environment parameters
## @return str: stdout
def run_rospack(self, pkgname, command):
rpp = os.path.abspath('test')
return self._run_rospack(rpp, pkgname, command)[1]
## @return str: stdout
def erun_rospack(self, ros_package_path, pkgname, command):
return self._run_rospack(ros_package_path, pkgname, command)[1]
## runs rospack with ROS_PACKAGE_PATH set to ./test
## @return int: status code
def run_rospack_status(self, pkgname, command):
rpp = os.path.abspath('test')
return self._run_rospack(rpp, pkgname, command)[0]
## @return int: status code
def erun_rospack_status(self, ros_package_path, pkgname, command):
return self._run_rospack(ros_package_path, pkgname, command)[0]
## assert that rospack fails on the specified args
def rospack_fail(self, package, command):
rpp = os.path.abspath('test')
code, stdout, stderr = self._run_rospack(rpp, package, command)
self.assertNotEquals(0, code, "rospack [%s %s] should have failed. \n\nstdout[%s] \n\nstderr[%s]"%(command, package, stdout, stderr))
## assert that rospack fails on the specified args. includes ROS_PACKAGE_PATH
def erospack_fail(self, ros_package_path, package, command):
code, stdout, stderr = self._run_rospack(ros_package_path, package, command)
self.assertNotEquals(0, code, "rospack [%s %s] should have failed instead of returning status code 0. \n\nstdout[%s] \n\nstderr[%s]"%(command, package, stdout, stderr))
## assert that rospack succeeds on the specified args
def rospack_succeed(self, package, command):
rpp = os.path.abspath('test')
status_code, stdout, stderr = self._run_rospack(rpp, package, command)
self.assertEquals(0, status_code, '"rospack %s %s" failed with status code [%s] instead of succeeding with [0]. \n\nstdout[%s] \n\nstderr[%s]'%(command, package, status_code, stdout, stderr))
## assert that rospack succeeds on the specified args
def erospack_succeed(self, ros_package_path, package, command):
status_code, stdout, stderr = self._run_rospack(ros_package_path, package, command)
self.assertEquals(0, status_code, "rospack [%s %s, env ROS_PACKAGE_PATH=%s] failed with status code [%s] instead of succeeding with [0]. \n\nstdout[%s] \n\nstderr[%s]"%(command, package, ros_package_path, status_code, stdout, stderr))
# helper routine that does return value validation where the return value from
# rospack is an unordered, line-separated list
def check_ordered_list(self, command, tests):
for retlist, package in tests:
expected = set(retlist)
self.rospack_succeed(package, command)
retval = self.strip_opt_ros(self.run_rospack(package, command))
retactual = [v for v in retval.split('\n') if v]
self.failIf(set(retlist) ^ set(retactual), "rospack %s %s failed: [%s] vs [%s]"%(command, package, retlist, retactual))
self.assertEquals('\n'.join(retlist), '\n'.join(retactual))
# variant of check_ordered_list that allows specifying ros_package_path.
# helper routine that does return value validation where the return value from
# rospack is an unordered, line-separated list
def echeck_ordered_list(self, command, tests):
for retlist, ros_package_path, package in tests:
expected = set(retlist)
self.erospack_succeed(ros_package_path, package, command)
retval = self.erun_rospack(ros_package_path, package, command)
retactual = [v for v in retval.split('\n') if v]
self.failIf(set(retlist) ^ set(retactual), "[env %s] rospack %s %s failed: [%s] vs [%s]"%(ros_package_path, command, package, retlist, retactual))
# variant that does not require ordering among the return values
def check_unordered_list(self, command, tests):
for retlist, package in tests:
expected = set(retlist)
self.rospack_succeed(package, command)
retval = self.run_rospack(package, command)
retactual = [v for v in retval.split('\n') if v]
self.failIf(set(retlist) ^ set(retactual), "rospack %s %s failed: [%s] vs [%s]"%(command, package, retlist, retactual))
#self.assertEquals('\n'.join(retlist), '\n'.join(retactual))
# variant that does not require ordering among the return values
def echeck_unordered_list(self, command, tests):
for retlist, ros_package_path, package in tests:
expected = set(retlist)
self.erospack_succeed(ros_package_path, package, command)
retval = self.erun_rospack(ros_package_path, package, command)
retactual = [v for v in retval.split('\n') if v]
self.failIf(set(retlist) ^ set(retactual), "rospack %s %s failed: [%s] vs [%s]"%(command, package, retlist, retactual))
#self.assertEquals('\n'.join(retlist), '\n'.join(retactual))
################################################################################
## ARG PARSING
def test_no_option(self):
self.rospack_succeed(None, None)
def test_fake_option(self):
self.rospack_fail("deps", "--fake deps")
def test_invalid_option(self):
self.rospack_fail("deps", "deps --lang=cpp --attrib=flags")
self.rospack_fail("deps", "deps --lang=cpp")
self.rospack_fail("deps", "deps --attrib=lflags")
self.rospack_fail("base", "export --lang=cpp --attrib=cflags --top=")
self.rospack_fail(None, "profile --length=")
self.rospack_fail(None, "deps --length=10")
self.rospack_fail(None, "deps --zombie-only")
self.rospack_fail(None, "profile --deps-only")
def test_ros_cache_timeout(self):
env = os.environ.copy()
os.environ['ROS_CACHE_TIMEOUT'] = '0'
self.rospack_succeed(None, "profile")
os.environ['ROS_CACHE_TIMEOUT'] = '-1'
self.rospack_succeed(None, "profile")
import time
time.sleep(0.1)
os.environ['ROS_CACHE_TIMEOUT'] = '.001'
self.rospack_succeed(None, "profile")
os.environ = env
def test_profile(self):
# TODO: test that the output is correct
self.rospack_succeed(None, "profile --zombie-only")
# TODO: test that the output is correct
self.rospack_succeed(None, "profile --length=10")
def test_ros_home(self):
env = os.environ.copy()
# Make sure we write to ROS_HOME, #2812.
d = tempfile.mkdtemp()
self.assertEquals(0, len(os.listdir(d)))
os.environ['ROS_HOME'] = d
self.rospack_succeed(None, "profile")
self.assertEquals(1, len(os.listdir(d)))
cache_path = os.path.join(d, os.listdir(d)[0])
self.assertEquals(True, os.path.exists(cache_path))
# Make sure we auto-create ROS_HOME
shutil.rmtree(d)
self.rospack_succeed(None, "profile")
self.assertEquals(True, os.path.exists(cache_path))
# Test with a corrupted cache
f = open(cache_path, 'w')
f.write('#SOMETHING\n')
f.close()
self.rospack_succeed(None, "list")
# Make sure we proceed when we can't write to ROS_HOME
os.chmod(d, 0000)
self.rospack_succeed(None, "profile")
# Delete the .ros directory, just in case this test is being run as
# root, in which case the above call will cause .ros to be created,
# despite the restrictive permissions that were set.
if os.path.exists(d):
os.chmod(d, 0o700)
shutil.rmtree(d)
# Make sure we proceed when we HOME/.ros isn't a directory
f = open(d, 'w')
f.close()
os.chmod(d, 0o700)
self.rospack_succeed(None, "profile")
# Make sure we proceed when neither HOME nor ROS_HOME is set
del os.environ['ROS_HOME']
del os.environ['HOME']
self.rospack_succeed(None, "profile")
# Clean up
os.unlink(d)
os.environ = env
def test_no_package_allowed(self):
self.rospack_succeed(None, "help")
self.rospack_succeed(None, "profile")
self.rospack_succeed(None, "list")
self.rospack_succeed(None, "list-names")
self.rospack_succeed(None, "list-duplicates")
self.rospack_succeed(None, "langs")
def test_no_package_allowed_bad(self):
self.rospack_fail("deps", "profile")
self.rospack_fail("deps", "list")
self.rospack_fail("deps", "list-names")
self.rospack_fail("deps", "list-duplicates")
self.rospack_fail("deps", "langs")
def test_export_bad(self):
self.rospack_fail("base", "export --lang= --attrib=lflags")
self.rospack_fail("base", "export --lang=cpp --attrib=")
self.rospack_fail("base", "export --attrib=lflags")
self.rospack_fail("base", "export --lang=cpp")
self.rospack_fail("base", "export --lang=cpp --lang=python --attrib=lflags")
self.rospack_fail("base", "export --lang=cpp --attrib=lflags --attrib=cflags")
self.rospack_fail("base", "export --lang=cpp --attrib=cflags --top=foo")
def test_plugins_bad(self):
self.rospack_fail("base", "plugins")
self.rospack_fail("base", "plugins --lang=cpp")
self.rospack_fail("base", "plugins --attrib=")
self.rospack_fail("base", "plugins --top=foo")
def test_rosdep(self):
self.rospack_succeed("base", "rosdep")
self.assertEquals("name: foo", self.run_rospack("base", "rosdep"))
self.rospack_succeed("deps", "rosdep0")
self.assertEquals("name: bar", self.run_rospack("deps", "rosdep0"))
self.check_unordered_list("rosdep", [(["name: foo", "name: bar"], "deps")])
################################################################################
## EXPORT
def test_export_cpp(self):
package = 'base'
tests = [("-lfoo", "export --lang=cpp --attrib=lflags"),
("-lfoo", "export --attrib=lflags --lang=cpp"),
("-Isomething", "export --lang=cpp --attrib=cflags"),
("-Isomething", "export --attrib=cflags --lang=cpp"),
]
for retval, arg in tests:
self.rospack_succeed(package, arg)
self.assertEquals(retval, self.strip_opt_ros(self.run_rospack(package, arg)))
self.assertEquals("-lfoo -lbar", self.strip_opt_ros(self.run_rospack("deps", "export --lang=cpp --attrib=lflags --deps-only")))
#TODO: test export with $prefix
def test_export_roslang(self):
package = 'base'
tests = [("something.cmake", "export --lang=roslang --attrib=cmake")]
for retval, arg in tests:
self.rospack_succeed(package, arg)
self.assertEquals(retval, self.strip_opt_ros(self.run_rospack(package, arg)))
def test_export_non_existent_attrib(self):
self.rospack_succeed("base", "export --lang=cpp --attrib=fake")
self.failIf(self.run_rospack("base", "export --lang=cpp --attrib=fake"))
################################################################################
## Plugins
def test_plugins(self):
tests = [(["deps foo.cmake", "plugins bat.cmake"], "base")]
self.check_unordered_list("plugins --attrib=cmake", tests)
package = 'base'
tests = [("deps foo.cmake", "plugins --attrib=cmake --top=deps")]
for retval, arg in tests:
self.rospack_succeed(package, arg)
self.assertEquals(retval, self.strip_opt_ros(self.run_rospack(package, arg)))
package = 'base_two'
tests = [("deps bar.cmake", "plugins --attrib=cmake")]
for retval, arg in tests:
self.rospack_succeed(package, arg)
self.assertEquals(retval, self.strip_opt_ros(self.run_rospack(package, arg)))
################################################################################
## ENVIRONMENT TEST
## test rospack with ROS_PACKAGE_PATH set
def test_ros_package_path(self):
testp = os.path.abspath('test')
tests = [
(["base", "base_two"], testp, "deps"),
]
self.echeck_ordered_list("deps", tests)
## tests internal rpp precedence (#2854)
def test_ros_package_path_precedence(self):
teste = os.path.abspath('test_empty')
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
testp_roslang = os.path.join(testp, 'roslang')
test2p_roslang = os.path.join(test2p, 'roslang')
tests = [([testp_roslang], teste + ':' + ':'.join([testp, test2p]), "roslang"),
([testp_roslang], teste + ':' + ':'.join([testp, test2p_roslang]), "roslang"),
([testp_roslang], teste + ':' + ':'.join([testp_roslang, test2p]), "roslang"),
([testp_roslang], teste + ':' + ':'.join([testp_roslang, test2p_roslang]), "roslang")]
self.echeck_unordered_list('find', tests)
## tests rpp vs rr precedence
def test_ros_package_path_precedence_1(self):
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
test3p = os.path.abspath('test3')
tests = [
(["test"], testp + ':' + test2p, "precedence1"),
(["test2"], test2p + ':' + testp, "precedence1"),
(["test2"], testp + ':' + "%s:%s"%(test2p, test3p), "precedence2"),
(["test3"], testp + ':' + "%s:%s"%(test3p, test2p), "precedence2"),
]
self.echeck_ordered_list('libs-only-l', tests)
## tests list-duplicates
def test_list_duplicates(self):
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
test3p = os.path.abspath('test3')
self.erospack_succeed(testp, None, 'list-duplicates')
self.erospack_succeed(testp + ':' + '%s:%s'%(test2p,test3p), None, 'list-duplicates')
# test ability to point ros_package_path directly at package
def test_ros_package_path_direct_package(self):
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
test3p = os.path.abspath('test3')
# point directly at precedence 2/3
rpp = ':'.join([os.path.join(test2p, 'precedence2'),os.path.join(test3p, 'precedence3')])
tests = [
(["test2"], testp + ':' + rpp, "precedence2"),
(["test3"], testp + ':' + rpp, "precedence3"),
]
self.echeck_ordered_list('libs-only-l', tests)
def test_ros_package_path_colons(self):
# scatter some colons into ros package path to make sure rospack doesn't mind
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
# Add a trailing slash, to make sure that it gets removed
test3p = os.path.abspath('test3') + '/'
tests = [
(["base","base_two"], testp + ':' + "::%s:::"%testp, "deps"),
(["base","base_two"], testp + ':' + "::", "deps"),
]
self.echeck_ordered_list('deps', tests)
tests = [
(["test"], testp + ':' + ":::%s:"%test2p, "precedence1"),
(["test2"],testp + ':' + "::%s::%s::"%(test2p,test3p), "precedence2"),
]
self.echeck_ordered_list("libs-only-l", tests)
def test_ros_package_path_bad_paths(self):
testp = os.path.abspath('test')
test2p = os.path.abspath('test2')
non_existentp = os.path.abspath('test')
tests = [
(["test"], testp + ':' + non_existentp, "precedence1"),
(["test2"],testp + ':' + ":%s:%s"%(non_existentp, test2p), "precedence2"),
(["test2"],testp + ':' + ":%s:%s"%(test2p, non_existentp), "precedence2"),
]
self.echeck_ordered_list("libs-only-l", tests)
# Test rospack from within a package
def test_ros_in_package(self):
pwd = os.getcwd()
rpp = os.path.join(pwd, 'test')
os.chdir(os.path.abspath(os.path.join('test', 'deps')))
self.erospack_succeed(rpp, None, 'depends1')
self.echeck_unordered_list('depends1', [(["base", "base_two"], rpp, None)])
# Check what happens when we're in an unlinked directory
d = tempfile.mkdtemp()
os.chdir(d)
os.rmdir(d)
self.erospack_fail(rpp, None, 'depends1')
os.chdir(pwd)
################################################################################
## rospack list
def _rospack_list(self, ros_package_path):
env = os.environ.copy()
if ros_package_path is not None:
env[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in env:
del env[ROS_PACKAGE_PATH]
args = [ROSPACK_PATH, 'list']
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
retval = p.communicate()[0]
return p.returncode, retval.strip().decode('ascii')
def _check_rospack_list(self, expected, retval):
lines = [l for l in retval.split('\n') if l]
packages = [l[:l.find(' ')] for l in lines]
# canonicalize paths
paths = [os.path.abspath(l[l.find(' ')+1:]) for l in lines]
result = {}
for pack, path in zip(packages, paths):
result[pack] = os.path.abspath(path)
self.failIf(set(expected.keys()) ^ set(packages), "package lists do not match (expected vs. actual): %s vs %s"%(expected.keys(), packages))
for pack,path in expected.items():
self.assertEquals(path, result[pack])
## test rospack list on an empty tree
def test_rospack_list_empty(self):
rpp = os.path.abspath('test_empty')
retcode, retval = self._rospack_list(rpp)
self.assertEquals(0, retcode)
self.failIf(retval, "rospack list on empty directory returned value %s"%retval)
## test rospack depends-on1 in a directory that's not a package (#2556)
def test_rospack_depends_on_not_a_package(self):
pwd = os.getcwd()
rpp = os.path.abspath('test')
os.chdir(os.path.abspath('/'))
self.erospack_fail(rpp, None, 'depends-on1')
os.chdir(pwd)
# test that rospack list removes duplicates
def test_rospack_list_dups(self):
# make sure result is same if ROS_ROOT=ROS_PACKAGE_PATH
rpp = os.path.abspath('structure_test')
retcode, retval = self._rospack_list(rpp)
self.assertEquals(0, retcode)
def test_rospack_list_no_rpp(self):
rpp = os.path.abspath('structure_test')
expected = structure_test.copy()
retcode, retval = self._rospack_list(rpp)
self.assertEquals(0, retcode)
self._check_rospack_list(expected, retval)
#TODO: symlink test
#TODO: test with ros package path
################################################################################
## rospack list-names
def _rospack_list_names(self, ros_package_path):
env = os.environ.copy()
if ros_package_path is not None:
env[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in env:
del env[ROS_PACKAGE_PATH]
args = [ROSPACK_PATH, 'list-names']
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
retval = p.communicate()[0]
return p.returncode, retval.strip().decode('ascii')
## test rospack list-names on an empty tree
def test_rospack_list_names_empty(self):
rpp = os.path.abspath('test_empty')
retcode, retval = self._rospack_list_names(rpp)
self.assertEquals(0, retcode)
self.failIf(retval, "rospack list-names on empty directory returned value %s"%retval)
# test that rospack list removes duplicates
def test_rospack_list_names_dups(self):
# make sure result is same if ROS_ROOT=ROS_PACKAGE_PATH
rpp = os.path.abspath('structure_test')
retcode, retval = self._rospack_list_names(rpp)
self.assertEquals(0, retcode)
retcode2, retval2 = self._rospack_list_names(rpp)
self.assertEquals(0, retcode2)
self.assertEquals(retval, retval2, "rospack list-names did not remove duplicates")
def test_rospack_list_names_no_rpp(self):
rpp = os.path.abspath('structure_test')
expected = set(structure_test.copy().keys())
retcode, retval = self._rospack_list_names(rpp)
self.assertEquals(0, retcode)
self.assertEquals(expected, set(retval.split()))
#TODO: symlink test
#TODO: test with ros package path
################################################################################
## rospack find
## test rospack find on non-existent package
def test_rospack_find_fail(self):
rpp = os.path.abspath('test_empty')
self.erospack_fail(rpp, 'package', 'find')
## test rospack find with ros_package_path set directly to a package
def test_rospack_find_direct(self):
testp = os.path.abspath('test')
package1p = os.path.abspath(os.path.join('structure_test', 'package1'))
self.erospack_succeed(testp + ':' + package1p, 'package1', 'find')
self.assertEquals(package1p, self.erun_rospack(testp + ':' + package1p, 'package1', 'find'))
## test rospack find with ros_package_path set directly to a package,
## where that package contains a rospack_nosubdirs file, #3191.
def test_rospack_find_direct_with_rospack_nosubdirs(self):
testp = os.path.abspath('test')
package2p = os.path.abspath(os.path.join('structure_test', 'package2'))
self.erospack_succeed(testp + ':' + package2p, 'package2', 'find')
self.assertEquals(package2p, self.erun_rospack(testp + ':' + package2p, 'package2', 'find'))
def test_rospack_find_no_rpp(self):
rpp = os.path.abspath('structure_test')
expected = structure_test.copy()
for package,path in expected.items():
self.erospack_succeed(rpp, package, 'find')
self.assertEquals(path, os.path.abspath(self.erun_rospack(rpp, package, 'find')))
#TODO: symlink test
#TODO: test with ros package path
################################################################################
## DEPENDENCIES
def test_deps(self):
depth_list = ['depth-%s'%i for i in range(1, 101)]
depth_list.reverse()
tests = [
(["base","base_two"], "deps"),
(["base","base_two","deps"], "deps_higher"),
(["base","base_two","deps","deps_higher"],"deps_dup"),
(depth_list, "depth-0")
]
self.check_ordered_list('deps', tests)
def test_deps1(self):
tests = [
(["base","base_two"], "deps"),
(["deps"], "deps_higher"),
(["depth-1"], "depth-0"),
(["depth-99"], "depth-98"),
]
self.check_ordered_list('deps1',tests)
def test_deps_invalid(self):
self.rospack_fail("deps_invalid", "deps")
def test_depends_on(self):
depth_list = ['depth-%s'%i for i in range(0, 100)]
depth_list.reverse()
self.rospack_succeed("deps", "depends-on")
tests = [
(["plugins", "deps_dup", "deps", "deps_higher"], "base"),
(["deps_higher","deps_dup"], "deps"),
([], "depth-0"),
(depth_list, "depth-100"),
]
self.check_unordered_list("depends-on", tests)
def test_depends_on1(self):
# sanity check first
self.rospack_succeed("deps", "depends-on")
tests = [
(["deps_higher"], "deps"),
(["deps", "deps_dup", "plugins"], "base"),
(["deps", "deps_dup"], "base_two"),
]
self.check_unordered_list("depends-on1", tests)
def test_depends_on_nonexistent(self):
self.rospack_fail("deps", "deps_nonexistent")
self.rospack_fail("deps", "nonexistentpackage")
tests = [
(["deps_nonexistent"], "nonexistentpackage"),
]
self.check_ordered_list("depends-on", tests)
def test_lflags_base(self):
self.rospack_succeed("base", "libs-only-l")
self.assertEquals("foo", self.run_rospack("base", "libs-only-l"))
def test_circular(self):
testp = os.path.abspath("test")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle0"), "self_ref", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle1"), "friend1", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle1"), "friend2", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle2"), "friend1", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle2"), "friend2", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle2"), "friend3", "deps")
self.erospack_fail(testp + ':' + os.path.abspath("test_circular/cycle2"), "friend3", "depends-on")
def test_lflags_backquote(self):
self.rospack_succeed("backquote", "libs-only-l")
self.assertEquals("loki foo backquote", self.run_rospack("backquote", "libs-only-l"))
def test_backquote_invalid(self):
self.rospack_fail("backquote_invalid", "libs-only-other")
# Strip out '/opt/ros' and friends from flags before checking them
def strip_opt_ros(self, flags):
prefix = '/opt/ros'
if 'ROS_BINDEPS_PATH' in os.environ:
prefix = os.environ['ROS_BINDEPS_PATH']
tostrip = [prefix + '/lib',
prefix + '/include',
'-L' + prefix + '/lib',
'-I' + prefix + '/include',
'-Wl,-rpath,' + prefix + '/lib']
res = ''
for f in flags.split(' '):
if f and f not in tostrip:
if len(res) > 0:
res += ' '
res += f
return res
def test_Lflags_backquote(self):
self.rospack_succeed("backquote", "libs-only-L")
self.assertEquals("odin", self.strip_opt_ros(self.run_rospack("backquote", "libs-only-L")))
def test_cflags_backquote(self):
self.rospack_succeed("backquote", "cflags-only-I")
self.assertEquals("blah backquote", self.strip_opt_ros(self.run_rospack("backquote", "cflags-only-I")))
def test_cflags_platform_specific(self):
self.rospack_succeed("platform_specific_exports", "cflags-only-other")
myos = platform.system()
if myos == 'Linux':
self.assertEquals("-DLINUX", self.run_rospack("platform_specific_exports", "cflags-only-other"))
elif myos == 'Darwin':
self.assertEquals("-DAPPLE", self.run_rospack("platform_specific_exports", "cflags-only-other"))
elif myos == 'Windows':
self.assertEquals("-DWINDOWS", self.run_rospack("platform_specific_exports", "cflags-only-other"))
else:
self.assertEquals("-DOTHER", self.run_rospack("platform_specific_exports", "cflags-only-other"))
self.assertEquals("blah backquote", self.strip_opt_ros(self.run_rospack("backquote", "cflags-only-I")))
def test_lflags_archive(self):
self.rospack_succeed("lflags_with_archive_lib", "libs-only-l")
self.assertEquals("/usr/lib/libfoo.a", self.run_rospack("lflags_with_archive_lib", "libs-only-l"))
self.rospack_succeed("lflags_with_archive_lib", "libs-only-other")
self.assertEquals("/a/bad/flag", self.run_rospack("lflags_with_archive_lib", "libs-only-other"))
def test_lflags_deps(self):
self.rospack_succeed("deps", "libs-only-l")
self.assertEquals("loki foo bar", self.run_rospack("deps", "libs-only-l"))
def test_lflags_deps_only(self):
self.rospack_succeed("deps", "libs-only-l --deps-only")
self.assertEquals("foo bar", self.run_rospack("deps", "libs-only-l --deps-only"))
def test_empty_lflags(self):
tests = [([], "deps_empty")]
commands = ["libs-only-l", "libs-only-L", "libs-only-other"]
for c in commands:
self.check_ordered_list(c, tests)
def test_empty_cflags(self):
tests = [([], "deps_empty")]
commands = ["cflags-only-I", "cflags-only-other"]
for c in commands:
self.check_ordered_list(c, tests)
def test_empty_vcs(self):
self.rospack_succeed("empty", "vcs0")
self.assertEquals("type: \turl:", self.run_rospack("empty", "vcs0"))
self.rospack_succeed("deps_empty", "vcs")
self.assertEquals("type: svn\turl: \ntype: \turl:", self.run_rospack("deps_empty", "vcs"))
def test_vcs_no_type_or_url(self):
self.rospack_succeed("vc_no_type_or_url", "vcs0")
self.assertEquals("", self.run_rospack("vc_no_type_or_url", "vcs0"))
def test_lflags_no_package_attrib(self):
self.rospack_fail("no_package_attribute", "libs-only-l")
def test_lflags_invalid(self):
self.rospack_fail("invalid", "libs-only-l")
def test_vcs_invalid(self):
self.rospack_fail("invalid", "vcs")
def test_deps1_invalid(self):
self.rospack_fail("invalid", "deps1")
def test_vcs0_deps(self):
self.rospack_succeed("deps", "vcs0")
self.failIf(self.run_rospack("deps", "vcs0"))
def test_vcs_deps(self):
self.rospack_succeed("deps", "vcs")
self.assertEquals("type: svn\turl: https://ros.svn.sourceforge.net/svnroot/ros/trunk\n"+
"type: svn\turl: https://ros.svn.sourceforge.net/svnroot/ros/branches", self.run_rospack("deps", "vcs"))
def test_deps_manifests(self):
self.rospack_succeed("deps", "deps-manifests")
testp = os.path.abspath('test')
expected = os.path.join(testp, 'base/manifest.xml') + ' ' + os.path.join(testp, 'base_two/manifest.xml')
self.assertEquals(expected,
self.run_rospack("deps", "deps-manifests"))
def test_deps_indent(self):
self.rospack_succeed("deps_higher", "deps-indent")
testp = os.path.abspath('test')
expected = 'deps\n base\n base_two'
self.assertEquals(expected,
self.run_rospack("deps_higher", "deps-indent"))
def _rospack_langs(self, ros_package_path, ros_lang_disable):
env = os.environ.copy()
if ros_package_path is not None:
env[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in env:
del env[ROS_PACKAGE_PATH]
if ros_lang_disable is not None:
env[ROS_LANG_DISABLE] = ros_lang_disable
elif ROS_LANG_DISABLE in env:
del env[ROS_LANG_DISABLE]
args = [ROSPACK_PATH, 'langs']
p = Popen(args, stdout=PIPE, stderr=PIPE, env=env)
retval = p.communicate()[0]
return p.returncode, retval.strip().decode('ascii')
def test_langs(self):
rpp = os.path.abspath('test')
retcode, retval = self._rospack_langs(rpp, None)
self.assertEquals(0, retcode)
# No guarantees on ordering of lang result
l = retval.split()
s = set(l)
expected = set(['rosfoo', 'rosbar'])
self.assertEquals(s, expected)
def test_langs_disable(self):
rpp = os.path.abspath('test')
disable = 'rosfoo'
retcode, retval = self._rospack_langs(rpp, disable)
self.assertEquals(0, retcode)
# No guarantees on ordering of lang result
l = retval.split()
s = set(l)
expected = set(['rosbar'])
self.assertEquals(s, expected)
def test_langs_empty(self):
rpp = os.path.abspath('test2')
retcode, retval = self._rospack_langs(rpp, None)
self.assertEquals(0, retcode)
self.failIf(retval, "rospack langs on empty directory returned value %s"%retval)
# Test auto-inclusion of msg_gen include directories, #3018
def test_msg_gen(self):
test_path = os.path.abspath('test')
pkgs = ['msg_gen_no_export', 'msg_gen_no_cpp', 'msg_gen_no_cflags']
for p in pkgs:
self.rospack_succeed(p, "cflags-only-I")
self.assertEquals(os.path.join(test_path, p, "msg_gen/cpp/include"), self.strip_opt_ros(self.run_rospack(p, "cflags-only-I")))
# Also test that we don't get auto-inclusion of msg_gen when we're
# asking for a different lang / attrib, #3884
pkg = 'msg_gen_no_cpp'
cmd = 'export --lang=cpp --attrib=lflags'
self.rospack_succeed(pkg, cmd)
self.assertEquals('', self.strip_opt_ros(self.run_rospack(pkg, cmd)))
cmd = 'export --lang=foo --attrib=bar'
self.rospack_succeed(pkg, cmd)
self.assertEquals('bat', self.run_rospack(pkg, cmd))
# Test that -q option suppresses errors, #3177.
def test_quiet_option(self):
rpp = os.path.abspath('test')
# With -q: look for non-existent package, make sure that it fails, yet
# produces nothing on stderr.
status_code, stdout, stderr = self._run_rospack(rpp, 'nonexistentpackage', 'find -q')
self.assertNotEquals(0, status_code)
self.assertEquals(0, len(stderr))
# Without -q: look for non-existent package, make sure that it fails,
# and produces somthing on stderr.
status_code, stdout, stderr = self._run_rospack(rpp, 'nonexistentpackage', 'find')
self.assertNotEquals(0, status_code)
self.assertNotEquals(0, len(stderr))
| [
"b1504000@planet.kanazawa-it.ac.jp"
] | b1504000@planet.kanazawa-it.ac.jp |
3a92a8f34525fff96eff72b5761fd50be60140c6 | 4b96315f283542152e54f7adae9cc5469c836277 | /news_blog/urls.py | 0b61538b098944e9f79cdb1d1a1529fe4d18f7a6 | [] | no_license | judekvn/new_blog | 0f64c14f9218eb0782c13338c43c84f2d06e4ad7 | e67f5cb02e93499e628cd28a3d1aa553bf8e9b1a | refs/heads/master | 2020-05-27T20:40:15.815062 | 2019-05-27T06:12:55 | 2019-05-27T06:12:55 | 188,781,965 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,011 | py | """news_blog URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
from django.urls import path, include
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('admin/', admin.site.urls),
path('api/', include('blog.urls')),
path ('auth/', include('login.urls'))
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"47389259+judekvn@users.noreply.github.com"
] | 47389259+judekvn@users.noreply.github.com |
bfef20ad2f7d3b8f758bc11aaea1b4ffe2e29ed0 | bceb2a3d1793ae7852375b1bb96ea70db7859952 | /hackerrank/String Split and Join.py | 43fc2dca7e4245660c738d688b06388960c25905 | [] | no_license | bappi2097/hackerrank-python | 047958955a1d06edb27f6fafccbc99d376e0fa15 | 8d1479a58517714f475b18485b1e5bb37c86c2b9 | refs/heads/master | 2023-04-02T13:47:17.180967 | 2021-04-08T15:49:55 | 2021-04-08T15:49:55 | 268,575,537 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 201 | py | def split_and_join(line):
line = line.split(" ")
line = '-'.join(line)
return line
if __name__ == '__main__':
line = input()
result = split_and_join(line)
print(result) | [
"bappi35-2097@diu.edu.bd"
] | bappi35-2097@diu.edu.bd |
00183aec3a7ed337f9c3c77304b469e033ffd08a | b7a244e3f3bb8b0a65d5b66d2af1e1306444b672 | /setup.py | 3cbac0eeeb0a6b32351a6ae3d9bbf59d0641aef7 | [] | no_license | Moose-cmd/alek.library | 297a6420ea73b6a4eecfe05f3f0428c85d062c23 | 4d81e15b726f1b3177a520d3c3047efd5ac54d13 | refs/heads/master | 2023-04-07T23:55:23.936563 | 2021-04-12T23:25:25 | 2021-04-12T23:25:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 972 | py | from setuptools import setup
DISTNAME = 'alek'
VERSION = '0.1.0'
DESCRIPTION = "Alek Chase's personal python library"
LONG_DESCRIPTION = """
**alek** is a python library that contains useful
functions for Alek Chase
Available functions:
* Hello
* Delay Print
* Clear
* Get int will be working soon... **
Report any bugs or email us with questions: no-reply@gmail.com
"""
LICENSE = 'MIT'
AUTHOR = "Alek Chase"
EMAIL = 'no-reply@gmail.com'
URL = 'https://github.com/AlekTheCoder/alek'
KEYWORDS = ['alek']
REQUIREMENTS = []
PYTHON = ">=3.5"
setup(name=DISTNAME,
packages=['alek'],
package_dir={'alek': 'module/alek'},
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
license=LICENSE,
author=AUTHOR,
author_email=EMAIL,
url=URL,
keywords=KEYWORDS,
install_requires=REQUIREMENTS,
python_requires=PYTHON,
) | [
"alekmani1@gmail.com"
] | alekmani1@gmail.com |
39549461a1c26617b353b75514751656bd458c33 | 9e8a94404a90d8d11dcd8452a9c1da0a71b36970 | /portfolio/migrations/0002_auto_20210526_1040.py | 9570bfdd6ec50fc6b3571ab637485c40cbbeffac | [] | no_license | bridgecrew-perf7/deployEdit | 509d06a1ef96df88ebe2116d07a8f6da8e470d92 | c9794c92544c89239e3786a27e870fb86411910b | refs/heads/master | 2023-06-03T04:00:41.889812 | 2021-06-15T18:09:28 | 2021-06-15T18:09:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,281 | py | # Generated by Django 3.2.2 on 2021-05-26 10:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolio', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='portfolio',
name='description',
field=models.CharField(default='', max_length=100, null=True),
),
migrations.AddField(
model_name='portfolio',
name='image',
field=models.ImageField(default='', null=True, upload_to='images/'),
),
migrations.AlterField(
model_name='portfolio',
name='body',
field=models.TextField(default='', null=True),
),
migrations.AlterField(
model_name='portfolio',
name='pub_date',
field=models.DateTimeField(default='', null=True),
),
migrations.AlterField(
model_name='portfolio',
name='title',
field=models.CharField(default='', max_length=200, null=True),
),
migrations.AlterField(
model_name='portfolio',
name='writer',
field=models.CharField(default='', max_length=100, null=True),
),
]
| [
"naturalflavor1007@naver.com"
] | naturalflavor1007@naver.com |
b36343e337e7ede3b68a7221786c70619ee858b7 | 1ea7facf372df3d712b755a2d7bbb6ce5072f1ad | /MutiThread/TestThread.py | ba74d08277bd324363872f9c9aaaf269eea128c1 | [] | no_license | chunonesoft/pythonCode | 87bd49849979a6476d3fc72d5710dc66ea9dc08b | 7ec2ab76479fe84706bfba83ef66717ff6d05904 | refs/heads/master | 2021-05-08T05:39:38.077403 | 2017-10-11T04:56:37 | 2017-10-11T04:56:37 | 106,498,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 586 | py | #coding=utf-8
import threading
from time import ctime,sleep
def music(func):
for i in range(2):
print "I was listening to %s. %s" %(func,ctime())
sleep(1000)
def move(func):
for i in range(2):
print "I was at the %s! %s" %(func,ctime())
sleep(5000)
threads = []
t1 = threading.Thread(target=music,args=(u'爱情买卖',))
threads.append(t1)
t2 = threading.Thread(target=move,args=(u'阿凡达',))
threads.append(t2)
if __name__ == '__main__':
for t in threads:
t.setDaemon(True)
t.start()
print "all over %s" %ctime() | [
"chun_soft@qq.com"
] | chun_soft@qq.com |
a570f27a7c1170f47520d0fd62cc5ef08e71442c | 255e19ddc1bcde0d3d4fe70e01cec9bb724979c9 | /all-gists/833679/snippet.py | 4ab198d900965b661dbb57b70023f8d4c2106db6 | [
"MIT"
] | permissive | gistable/gistable | 26c1e909928ec463026811f69b61619b62f14721 | 665d39a2bd82543d5196555f0801ef8fd4a3ee48 | refs/heads/master | 2023-02-17T21:33:55.558398 | 2023-02-11T18:20:10 | 2023-02-11T18:20:10 | 119,861,038 | 76 | 19 | null | 2020-07-26T03:14:55 | 2018-02-01T16:19:24 | Python | UTF-8 | Python | false | false | 2,067 | py | #
# HashUtils - Simple functions derivated from standard Python hashlib.
#
__author__ = 'Mauro Baraldi (mauro@visie.com.br)'
__version__ = '0.0.2: February 17, 2011'
import re
import hashlib
from datetime import datetime
class Hash:
"""Common facilities using hashlib standard lib. Algotrithm used in all
methods: MD5
Returns of method is hashlib.md5(object_to_hash).hexdigest()
Example of use:
from hashutils import Hash
h = Hash()
>>> h.now_hash()
'b3036f7831dc1394f1dcb6b989561d79'
>>> h.today_hash()
'b3036f7831dc1394f1dcb6b989561d79'
>>> h.string_hash("My name is Earl.")
'ad05d8348194adf6d6190a2ae550e099'
>>> h.file_hash('/home/mauro/passwords.txt')
'404627e52574140007692512e3ce2fa9'
>>> h.file_hash('/home/mauro/passwords.txt', 1024)
'997dd0044bc676fdf3f9db0560e642d0'
>>> h.from_date_hash((2001, 3, 1, 12, 45), '%Y/%m/%d %H:%M')
'fc573499016722e5ff0747f2dc7f4971'
"""
def __init__(self):
pass
def today_hash(self):
""" Return hash form datetime.today() function in format %Y%m%d """
self.today = datetime.today().strftime('%Y%m%d')
return hashlib.md5(self.today).hexdigest()
def now_hash(self):
""" Return hash form datetime.today() function in format %Y%m%d%H%M%S """
self.today = datetime.today().strftime('%Y%m%d')
return hashlib.md5(self.today).hexdigest()
def from_date_hash(self, date, strfmt):
""" Return hash form date in datetime.date format (%Y%m%d) """
self.format = re.compile('[a-zA-z]').sub('d', strfmt)
self.build_date = datetime.strptime(self.format % date, strfmt)
self.date = self.build_date.strftime(strfmt)
return hashlib.md5(self.date).hexdigest()
def string_hash(self, string):
""" Return hash form a given string. """
return hashlib.md5(string).hexdigest()
def file_hash(self, fp, size=128):
""" Return hash form a given file. Default first 128 bytes."""
with open(fp, 'r+') as temp:
return hashlib.md5(temp.read(size)).digest()
| [
"gistshub@gmail.com"
] | gistshub@gmail.com |
bf35aa5ab817f1d0dcf49a942c19f45e60b83065 | 30de8c6cc36fb2138020bc3f9f2d0fd69c95afa9 | /Bai 18 Bieu thuc pass.py | d43a2675d7b84ce485d1dc2932e7f05c9b8fce18 | [] | no_license | doimoi9191/python4tuan | 915f8f284c1e3706a1d25b73bee262da070cb5f9 | 27ed3ac5f90681846855037438ae8db6aab2374b | refs/heads/master | 2020-03-09T01:13:56.148527 | 2018-05-18T10:19:31 | 2018-05-18T10:19:31 | 128,508,594 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | """
Biểu thức pass dùng để dành chỗ lập trình.
Ví dụ bạn biết chỗ đó phải viết nhiều mã nhưng tại thời điểm này chưa làm kịp
ta sẽ dung pass để đánh dấu vị trí đó
"""
a = float(input("Nhập hệ số a: "))
b = float(input("Nhập hệ số b: "))
if a==0:
pass#ta sẽ làm sau, đang mót ị gần chết
else:
x = -b/a
print("Nghiệm x=",-b/a) | [
"doimoi9191@noreply.github.com"
] | doimoi9191@noreply.github.com |
45d53ab9a5a80560123d3ca622a745c0f5be0a35 | 5ce4c0df5a1ec1e948a0e870e17f3e035dcf9ece | /app.py | 9ebaddc5b2e5694d19ee57a0dacb27bc301da1fb | [] | no_license | Estudos1/pythonMingodb | f275f5de9a665e38539893b8c28fc0aa26aaa249 | bece09ae4ec7df60b868fee7f3f84c2b9f6415d0 | refs/heads/main | 2023-06-05T00:57:06.393502 | 2021-06-21T00:57:08 | 2021-06-21T00:57:08 | 378,767,263 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,618 | py | from flask import Flask, request, jsonify, Response
from flask_pymongo import PyMongo
from pymongo import message
from werkzeug.security import generate_password_hash, check_password_hash
from bson import json_util
from bson.objectid import ObjectId
from werkzeug.wrappers import response
app = Flask(__name__)
app.config['MONGO_URI']='mongodb://localhost/pythonmongodb'
mongo = PyMongo(app)
@app.route('/users',methods=['POST'])
def create_user():
# Receiving data
username = request.json['username']#limao
password = request.json['password']
email = request.json['email']
if username and email and password:
hashed_password = generate_password_hash(password)
id = mongo.db.users.insert(
{'username': username, 'email': email, 'password': hashed_password}
)
response = {
'íd': str(id),
'username': username,
'password': hashed_password,
'email' : email
}
return response
else:
return not_found()
return {'message': 'received'}
@app.route ('/users', methods=['GET'])
def get_users():
users = mongo.db.users.find()
response = json_util.dumps(users)
return Response (response, mimetype='application/json')
@app.route('/users/<id>', methods = ['GET'])
def get_user(id):
user = mongo.db.users.find_one({'_id': ObjectId(id)})
response = json_util.dumps(user)
return Response (response, mimetype="appplication/json")
@app.route('/users/<id>', methods=['DELETE'])
def delete_users(id):
mongo.db.users.delete_one({'_id':ObjectId(id)})
response = jsonify({'message': 'User' + id + 'was Deleted succesfully'})
return response
@app.route('/users/<id>', methods = ['PUT'])
def update_user(id):
username = request.json['username']
email = request.json['email']
password = request.json['password']
if username and email and password:
hashed_password = generate_password_hash(password)
mongo.db.users.update_one({'_id': ObjectId(id)}, {'$set':{
'username': username,
'password': hashed_password,
'email': email
}})
response = jsonify({'message': 'User' + id + 'was updated succefully'})
return response
@app.errorhandler(404)
def not_found(error=None):
response = jsonify({
'message': 'Resurce Not found:' + request.url,
'status': 404
})
response.status_code = 404
return response
if __name__== "__main__":
app.run(debug=True) | [
"noreply@github.com"
] | Estudos1.noreply@github.com |
610a51d5b47b4c57f28128ba5e35a1277cae3338 | d99e2102d4b79c31560b5a799eb8ea0b141a409e | /main.py | 80d992f98604c8ca28ac63cc14877097f9ac1321 | [] | no_license | imosafi/DNN_FC | e806fb53c9e6f46e363a3f18c1cfcb69955531b0 | a70b49106abec90e9064edeaf136245110a007bf | refs/heads/master | 2020-03-17T15:28:20.193449 | 2018-06-16T17:27:00 | 2018-06-16T17:27:00 | 133,712,055 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 555 | py | import os
import sys
from data_loader import DataLoader
from fc_model import FCClassifierModel
from utils import create_confusion_matrix
def main():
data_loader = DataLoader()
train_set, test_set = data_loader.load_dataset('mnist', input_normalization=False)
model = FCClassifierModel([784, 200, 100, 10], activation='relu')
model.train(train_set, epochs=30, val=test_set, LR=0.001, use_minibatch=False, use_decreasing_lr=False)
create_confusion_matrix(model, test_set, categories_num=10)
if __name__ == '__main__':
main()
| [
"itai.mosafi@live.biu.ac.il"
] | itai.mosafi@live.biu.ac.il |
a5ba5bef1c1edc9aa06f3fe87232501307f1a1b2 | c61c9bedba1968bfaf571ac3996b696fc35890a6 | /Chapter16/16-3.py | ce29436d126c588d3560f122a3141296ca60d21e | [] | no_license | ArunRamachandran/ThinkPython-Solutions | 497b3dbdeba1c64924fe1d9aa24204a9ca552c5b | 1a0872efd169e5d39b25134960168e3f09ffdc99 | refs/heads/master | 2020-04-01T10:23:20.255132 | 2014-11-07T17:04:52 | 2014-11-07T17:04:52 | 25,806,318 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 652 | py | class Time(object):
''' to represent the time of a day '''
t1 = Time()
t2 = Time()
t1.h = 4
t1.m = 185
t1.s = 0
t2.h = 1
t2.m = 56
t2.s = 0
def add_time(t1,t2):
sum_time = Time()
sum_time.h = t1.h + t2.h
sum_time.m = t1.m + t2.m
sum_time.s = t1.s + t2.s
if sum_time.s > 60:
val = sum_time.s / 60
sum_time.s -= (60 * val)
sum_time.m += val
if sum_time.m > 60:
val_1 = sum_time.m / 60
sum_time.m -= (60 * val_1)
sum_time.h += val_1
print '%.2d:%.2d:%.2d' % (sum_time.h,sum_time.m,sum_time.s)
print "t1 ",
print '%.2d:%.2d:%.2d' % (t1.h, t1.m, t1.s)
print "t2 ",
print '%.2d:%.2d:%.2d' % (t2.h, t2.m, t2.s)
add_time(t1,t2)
| [
"arunkramachandran92@gmail.com"
] | arunkramachandran92@gmail.com |
c68df435b51439815275e182cb0540bdcb066468 | cfae3e1fc4df3c32d95979525a8742ccf0222b14 | /src/main_split.py | 9d19997b3bd044444d2ee77d9234561bcf79ca26 | [] | no_license | 1hyx/addresssplit | 221a5c112c5d7b8ebfb99284d39c03b52ef2aa5e | d18069ff642509122c7a6f2b1cde3cb43ff394ac | refs/heads/master | 2020-07-12T15:44:16.061727 | 2019-09-04T08:43:23 | 2019-09-04T08:43:23 | 204,855,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,265 | py | """
author: huyuxin
date: 2019/8/28
用于分隔中国收费系统中常见的地址栏信息的规则分词
Use to Separate the words of detailed address information from China bank system, to get key words of different kinds of stores or companies
数据说明:
Data description:
一般存在的类别分为线上和线下消费记录
records divided by channels : online or offline
nowadays, in China, there are two main channel: Alipay and Tenpay which people always bind their credit cards on.
线上记录在进行记录时会出现分隔符,”, , - — :"等,这些分隔符成为了线上记录的标志,同时往往最后一个分割信息是商户的具体信息
significantly, the online records will usually contains seps, such as ", ,- —"etc. meanwhile, seps are specific for
online records. Constantly, the last separation is the detailed information for shops or companies.
排除线上和线下记录的不同点,一致要分割出的部分是商户信息中的类型信息、地理位置信息
except for differences between online and offline, must get the management type and location information from address
另外:由于报文对字符串的长度有限制,所以会出现不完整但有极强暗示完整信息的地址,设计进行补全
by the way, the bank system has the limitation on the length of address, so that there will be some incomplete but with
the remaining has strong implications, which we can design to complete.
使用资料:
reference materials:
中国所有省级行政区划地区名
all provinces in China
中国所有省会城市名
all provincial capital in China
对应的补完方式对应(自己设计)
the collection for incomplete —> complete couples(diy)
公司或店铺经营类型的后缀(自己收集)
the suffice of company and store to imply the management types(collected by myself)
"""
import pandas as pd
# 平台名 Common payment channels
payment_channels = pd.read_csv('../data/payment_channels.txt', header=None, encoding='gbk').values.tolist()
# 省级行政区划信息 province
province_list = pd.read_csv('../data/province.txt', header=None, encoding='gbk').values.tolist()
# 省会信息 center_city
center_city_list = pd.read_csv('../data/center_city.txt', header=None, encoding='gbk').values.tolist()
# 分割符号 seps
sep_list = pd.read_csv('../data/sep.txt', header=None, encoding='gbk', delimiter="\n").values.tolist()
# 后缀名 suffice
suffix_list = pd.read_csv('../data/company_type.txt', header=None, encoding='gbk').values.tolist()
# 补全对照表 part_full_table (type: DataFrame)
part_full_df = pd.read_csv('../data/part_company.txt', header=None, encoding='gbk')
# 经营类型表
company_list = pd.read_csv('../data/company_type.txt', header=None, encoding='gbk').values.tolist()
# 字符过多,财付通分割
def wechat_split(info, item):
wechat_list = ['财付通委托扣款方式', '财付通委托扣款方', '财付通委托扣款', '财付通委托扣', '财付通委托', '财付通委',
'财付通', '财付']
for i, we in enumerate(wechat_list):
flag = 0
index = item.find(we, 0)
if index != -1:
flag = 1
info['online_sign'] = '1'
info['sep_first'] = '财付通委托扣款方式'
if index == 0:
info['sep_last'] = item[index+(10-i):]
item = item[index+(10-i):]
return flag, info, item
else:
info['sep_last'] = item[:index]
item = item[:index]
return flag, info, item
else:
return flag, info, item
# 特殊字符分割
def sep_middle(info, item):
titles = ['sep_first', 'sep_last']
for sep in sep_list:
index_ = item.find(sep[0], 0)
if index_ != -1:
info['online_sign'] = '1'
items = item.split(sep[0])
info[titles[0]] = items[0]
info[titles[1]] = items[-1]
rest = items[-1]
return 1, info, rest
else:
return 0, info, item
# 中划线出现常伴随字符串过长被截断情形
# 针对可推测的情况进行补全
def part_company(info, item):
part_list = part_full_df[0].values.tolist()
full_list = part_full_df[1].values.tolist()
item_ends = [item[-5:], item[-4:], item[-3:], item[-2:]]
for i, item_end in enumerate(item_ends):
if item_end in part_list:
index = part_list.index(item_end)
info['management_type'] = full_list[index]
item = item[:-(5-i)]+full_list[index]
return info, item
else:
return info, item
# # 括号分割,将括号中的内容取出,并合并字符,考虑只有左括号的情形
def sep_brackets(info, item):
index_right = item.find(')', 0)
index_left = item.find('(', 0)
if index_right != -1:
info['bracket_content'] = item[index_left+1:index_right]
item = item[0:index_left]+item[index_right+1:]
return info, item
elif index_left != -1:
info['bracket_content'] = item[index_left+1:]
item = item[0:index_left]
return info, item
else:
return info, item
# # 将公司部分的信息切割
def company_split(info, item):
for com in company_list:
index = item.find(com[0], 0)
if index != -1:
info['management_type'] = com[0]
item = item[:index]
return info, item
else:
return info, item
# 地理位置的切割
def geography_split(info, item):
for pro in province_list:
index = item.find(pro[0], 0)
pro_len = len(pro[0])
if index != -1:
info['province'] = pro[0]
if index == 0:
item = item[index+pro_len:]
else:
item_temp = item[:index]+item[index+pro_len:]
if item_temp.find('公司'):
item = item_temp
else:
item = item[:index]
return info, item
else:
return info, item
# 省会城市标签
def center_city_split(info, item):
for pro in center_city_list:
index = item.find(pro[0], 0)
pro_len = len(pro[0])
if index != -1:
info['city'] = pro[0]
if index == 0:
item = item[index + pro_len:]
else:
item = item[:index]
return info, item
else:
return info, item
# 非省会非省地理信息切割
# 排除市本身的词汇情况:市区
def city_split(info, item):
special = ['市区', '市民', '超市', '城市']
f = 0
for spe in special:
pre_index = item.find(spe)
if pre_index != -1:
f = 1
break
if f == 0:
index = item.find('市')
if index != -1:
info['city'] = item[:index+1]
item = item[index+1:]
return info, item
else:
return info, item
else:
return info, item
def main_split(file, result_folder, result_name):
if type(file) == str:
names = pd.read_csv(file, dtype=str, header=None).values.tolist()
else:
names = file
final_list = []
n = len(names)
for i, name in enumerate(names):
name_in_use = name[0]
res0 = {'origin': name_in_use}
flag1, res1, item1 = sep_middle(res0, name[0])
flag2, res2, item2 = wechat_split(res0, name[0])
if flag1 == 1:
res_new, rest = part_company(res1, item1)
res_new, rest = sep_brackets(res_new, rest)
res_new1, rest1 = geography_split(res_new, rest)
res_new2, rest2 = center_city_split(res_new1, rest1)
res_final, rest_final = city_split(res_new2, rest2)
res_final['name'] = rest_final
final_list.append(res_final)
elif flag2 == 1:
res_new, rest = part_company(res2, item2)
res_new1, rest1 = geography_split(res_new, rest)
res_new2, rest2 = center_city_split(res_new1, rest1)
res_final, rest_final = city_split(res_new2, rest2)
res_final['name'] = rest_final
print(res_final)
final_list.append(res_final)
else:
res1, name1 = sep_brackets(res0, name_in_use)
res2, name2 = geography_split(res1, name1)
res21, name21 = center_city_split(res2, name2)
res3, name3 = company_split(res21, name21)
res4, name4 = city_split(res3, name3)
res4['name'] = name4
final_list.append(res4)
if i + 1 == n:
percent = 100.0
print('当前核算进度 : %s [%d/%d]' % (str(percent) + '%', i + 1, n), end='\n')
else:
percent = round(1.0 * i / n * 100, 2)
print('当前核算进度 : %s [%d/%d]' % (str(percent) + '%', i + 1, n), end='\r')
df = pd.DataFrame(final_list)
result_path = result_folder + result_name
df = df[['origin', 'online_sign', 'province', 'city', 'name', 'management_type', 'bracket_content',
'sep_first', 'sep_last']]
df.to_csv(result_path, index=None)
return result_path
| [
"hyx552211@163.com"
] | hyx552211@163.com |
5e875e702c4451a5fc79d1144425698fbc263c61 | 9743d5fd24822f79c156ad112229e25adb9ed6f6 | /xai/brain/wordbase/adjectives/_twining.py | 87ddda74e438d11af092105cfd9569d7a62ef7c6 | [
"MIT"
] | permissive | cash2one/xai | de7adad1758f50dd6786bf0111e71a903f039b64 | e76f12c9f4dcf3ac1c7c08b0cc8844c0b0a104b6 | refs/heads/master | 2021-01-19T12:33:54.964379 | 2017-01-28T02:00:50 | 2017-01-28T02:00:50 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 238 | py |
from xai.brain.wordbase.adjectives._twin import _TWIN
#calss header
class _TWINING(_TWIN, ):
def __init__(self,):
_TWIN.__init__(self)
self.name = "TWINING"
self.specie = 'adjectives'
self.basic = "twin"
self.jsondata = {}
| [
"xingwang1991@gmail.com"
] | xingwang1991@gmail.com |
e5a38c5a9fdd07a080234ac8bd5a5abbc1104502 | c1f64beb26fd6c895eaf80c02d23fc5dca932c9a | /main/kerbspaceHack/swagger_client/models/authority.py | d80a972b040279d02621418e007a4e403ed7c5a5 | [] | no_license | jmanchuck/citymaas | 43766bd3311ce90fda39fa66f30b1f8732c660c1 | c5d5c8916210355aabb8d6163a1e34c96e6a10c4 | refs/heads/master | 2020-09-22T03:14:05.827849 | 2019-12-01T23:55:23 | 2019-12-01T23:55:23 | 225,029,484 | 1 | 2 | null | 2019-12-01T23:55:24 | 2019-11-30T15:09:56 | Python | UTF-8 | Python | false | false | 4,408 | py | # coding: utf-8
"""
Kerbside Curblr Api
API for serving kerbside assets. Data is served in CurbLR format https://github.com/sharedstreets/curblr # noqa: E501
OpenAPI spec version: 1.0.0
Contact: tbd@ford.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Authority(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'phone': 'str',
'url': 'str'
}
attribute_map = {
'name': 'name',
'phone': 'phone',
'url': 'url'
}
def __init__(self, name=None, phone=None, url=None): # noqa: E501
"""Authority - a model defined in Swagger""" # noqa: E501
self._name = None
self._phone = None
self._url = None
self.discriminator = None
self.name = name
self.phone = phone
self.url = url
@property
def name(self):
"""Gets the name of this Authority. # noqa: E501
:return: The name of this Authority. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Authority.
:param name: The name of this Authority. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def phone(self):
"""Gets the phone of this Authority. # noqa: E501
:return: The phone of this Authority. # noqa: E501
:rtype: str
"""
return self._phone
@phone.setter
def phone(self, phone):
"""Sets the phone of this Authority.
:param phone: The phone of this Authority. # noqa: E501
:type: str
"""
if phone is None:
raise ValueError("Invalid value for `phone`, must not be `None`") # noqa: E501
self._phone = phone
@property
def url(self):
"""Gets the url of this Authority. # noqa: E501
:return: The url of this Authority. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Authority.
:param url: The url of this Authority. # noqa: E501
:type: str
"""
if url is None:
raise ValueError("Invalid value for `url`, must not be `None`") # noqa: E501
self._url = url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Authority, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Authority):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| [
"jjcheung0000@gmail.com"
] | jjcheung0000@gmail.com |
26c120e5190fda44553d068bb9c66537bda7f27a | 1e0d2a2b296d70067fc7dd5d6e3280c53a7ca9b3 | /src/network.py | 0383e69400229cd07fe8859a8f46f3b005b42a4d | [] | no_license | gabr/NeuralNetworkDigits | 564ebdcc2bafba3e04e7a91bfc01f10631beb42d | e0af756f49c5960e29657187e0748f037463ad2a | refs/heads/master | 2021-01-10T11:10:33.298078 | 2015-10-26T09:50:44 | 2015-10-26T09:50:44 | 44,958,364 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,206 | py | """
network.py
~~~~~~~~~~
A module to implement the stochastic gradient descent learning
algorithm for a feedforward neural network. Gradients are calculated
using backpropagation. Note that I have focused on making the code
simple, easily readable, and easily modifiable. It is not optimized,
and omits many desirable features.
"""
#### Libraries
# Standard library
import random
# Third-party libraries
import numpy as np
class Network(object):
def __init__(self, sizes):
"""The list ``sizes`` contains the number of neurons in the
respective layers of the network. For example, if the list
was [2, 3, 1] then it would be a three-layer network, with the
first layer containing 2 neurons, the second layer 3 neurons,
and the third layer 1 neuron. The biases and weights for the
network are initialized randomly, using a Gaussian
distribution with mean 0, and variance 1. Note that the first
layer is assumed to be an input layer, and by convention we
won't set any biases for those neurons, since biases are only
ever used in computing the outputs from later layers."""
self.num_layers = len(sizes)
self.sizes = sizes
self.biases = [np.random.randn(y, 1) for y in sizes[1:]]
self.weights = [np.random.randn(y, x)
for x, y in zip(sizes[:-1], sizes[1:])]
def feedforward(self, a):
"""Return the output of the network if ``a`` is input."""
for b, w in zip(self.biases, self.weights):
a = sigmoid(np.dot(w, a)+b)
return a
def SGD(self, training_data, epochs, mini_batch_size, eta,
test_data=None):
"""Train the neural network using mini-batch stochastic
gradient descent. The ``training_data`` is a list of tuples
``(x, y)`` representing the training inputs and the desired
outputs. The other non-optional parameters are
self-explanatory. If ``test_data`` is provided then the
network will be evaluated against the test data after each
epoch, and partial progress printed out. This is useful for
tracking progress, but slows things down substantially."""
result = [0 for _ in xrange(epochs)]
if test_data: n_test = len(test_data)
n = len(training_data)
for j in xrange(epochs):
random.shuffle(training_data)
mini_batches = [
training_data[k:k+mini_batch_size]
for k in xrange(0, n, mini_batch_size)]
for mini_batch in mini_batches:
self.update_mini_batch(mini_batch, eta)
result[j] = self.evaluate(test_data)
return result
def update_mini_batch(self, mini_batch, eta):
"""Update the network's weights and biases by applying
gradient descent using backpropagation to a single mini batch.
The ``mini_batch`` is a list of tuples ``(x, y)``, and ``eta``
is the learning rate."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
for x, y in mini_batch:
delta_nabla_b, delta_nabla_w = self.backprop(x, y)
nabla_b = [nb+dnb for nb, dnb in zip(nabla_b, delta_nabla_b)]
nabla_w = [nw+dnw for nw, dnw in zip(nabla_w, delta_nabla_w)]
self.weights = [w-(eta/len(mini_batch))*nw
for w, nw in zip(self.weights, nabla_w)]
self.biases = [b-(eta/len(mini_batch))*nb
for b, nb in zip(self.biases, nabla_b)]
def backprop(self, x, y):
"""Return a tuple ``(nabla_b, nabla_w)`` representing the
gradient for the cost function C_x. ``nabla_b`` and
``nabla_w`` are layer-by-layer lists of numpy arrays, similar
to ``self.biases`` and ``self.weights``."""
nabla_b = [np.zeros(b.shape) for b in self.biases]
nabla_w = [np.zeros(w.shape) for w in self.weights]
# feedforward
activation = x
activations = [x] # list to store all the activations, layer by layer
zs = [] # list to store all the z vectors, layer by layer
for b, w in zip(self.biases, self.weights):
z = np.dot(w, activation)+b
zs.append(z)
activation = sigmoid(z)
activations.append(activation)
# backward pass
delta = self.cost_derivative(activations[-1], y) * \
sigmoid_prime(zs[-1])
nabla_b[-1] = delta
nabla_w[-1] = np.dot(delta, activations[-2].transpose())
# Note that the variable l in the loop below is used a little
# differently to the notation in Chapter 2 of the book. Here,
# l = 1 means the last layer of neurons, l = 2 is the
# second-last layer, and so on. It's a renumbering of the
# scheme in the book, used here to take advantage of the fact
# that Python can use negative indices in lists.
for l in xrange(2, self.num_layers):
z = zs[-l]
sp = sigmoid_prime(z)
delta = np.dot(self.weights[-l+1].transpose(), delta) * sp
nabla_b[-l] = delta
nabla_w[-l] = np.dot(delta, activations[-l-1].transpose())
return (nabla_b, nabla_w)
def evaluate(self, test_data):
"""Return the number of test inputs for which the neural
network outputs the correct result. Note that the neural
network's output is assumed to be the index of whichever
neuron in the final layer has the highest activation."""
test_results = [(np.argmax(self.feedforward(x)), y)
for (x, y) in test_data]
return sum(int(x == y) for (x, y) in test_results)
def cost_derivative(self, output_activations, y):
"""Return the vector of partial derivatives \partial C_x /
\partial a for the output activations."""
return (output_activations-y)
#### Miscellaneous functions
def sigmoid(z):
"""The sigmoid function."""
return 1.0/(1.0+np.exp(-z))
def sigmoid_prime(z):
"""Derivative of the sigmoid function."""
return sigmoid(z)*(1-sigmoid(z))
| [
"arkadiuszgabrys@gmail.com"
] | arkadiuszgabrys@gmail.com |
e354add3beb18f533d7157be7068cbf4b7dd45db | 0b5b699459252996f058c8303a1f7093e7951ba0 | /food_delivery_app/restaurants/filters.py | 1c91e8a329ec67287390dbb43fe190d8aa8fe536 | [
"MIT"
] | permissive | MahmoudFarid/Food-Delivery-App | f145293548949618ae47d81f4ee7c35629fdaf5c | 8411ca48497e7347fe0258b720c2d2a566bb6e88 | refs/heads/master | 2020-04-12T04:40:22.129486 | 2018-12-23T21:52:09 | 2018-12-23T21:52:09 | 162,302,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 198 | py | from django_filters import rest_framework as filters
from .models import Order
class OrderFilter(filters.FilterSet):
class Meta:
model = Order
fields = ['customer', 'status']
| [
"mahmoud.farid.94@gmail.com"
] | mahmoud.farid.94@gmail.com |
47052e514f5c555a62a656eb1c3f16105d60d88c | 6eaf8375ac7650d8e0ce12e728456ea16d676a01 | /hiprovider_main/wsgi.py | 41c4d33b1f9d03bbb677b23a25bf6219bdf96755 | [] | no_license | alpaolo/hiprovider | c244f4698b6d956ff65bcc1ef1c1f61893604cdf | 80789e466e95748aed97201831d00907b385260e | refs/heads/master | 2023-03-27T18:49:15.624619 | 2021-03-19T22:01:39 | 2021-03-19T22:01:39 | 336,100,488 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for hiprovider_main project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'hiprovider_main.settings')
application = get_wsgi_application()
| [
"paolo.alberti@clipart.it"
] | paolo.alberti@clipart.it |
0964ca87b1476b689cf1f886a4e21864d6b7bb07 | d488f052805a87b5c4b124ca93494bc9b78620f7 | /google-cloud-sdk/.install/.backup/lib/googlecloudsdk/command_lib/functions/deploy/labels_util.py | 5e9da496f8ef33a5e94a3f93ad396421b5bf7ef7 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | PacktPublishing/DevOps-Fundamentals | 5ce1fc938db66b420691aa8106ecfb3f9ceb1ace | 60597e831e08325c7e51e8557591917f7c417275 | refs/heads/master | 2023-02-02T04:48:15.346907 | 2023-01-30T08:33:35 | 2023-01-30T08:33:35 | 131,293,311 | 13 | 19 | null | null | null | null | UTF-8 | Python | false | false | 2,718 | py | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""'functions deploy' utilities for labels."""
from googlecloudsdk.api_lib.functions import util as api_util
from googlecloudsdk.calliope import exceptions as calliope_exceptions
from googlecloudsdk.command_lib.util.args import labels_util as args_labels_util
NO_LABELS_STARTING_WITH_DEPLOY_MESSAGE = (
'Label keys starting with `deployment` are reserved for use by deployment '
'tools and cannot be specified manually.')
def CheckNoDeploymentLabels(flag_name, label_names):
"""Check for labels that start with `deployment`, which is not allowed.
Args:
flag_name: The name of the flag to include in case of an exception
label_names: A list of label names to check
Raises:
calliope_exceptions.InvalidArgumentException
"""
if not label_names:
return
for label_name in label_names:
if label_name.startswith('deployment'):
raise calliope_exceptions.InvalidArgumentException(
flag_name, NO_LABELS_STARTING_WITH_DEPLOY_MESSAGE)
def SetFunctionLabels(function, update_labels, remove_labels, clear_labels):
"""Set the labels on a function based on args.
Args:
function: the function to set the labels on
update_labels: a dict of <label-name>-<label-value> pairs for the labels to
be updated, from --update-labels
remove_labels: a list of the labels to be removed, from --remove-labels
clear_labels: a bool representing whether or not to clear all labels,
from --clear-labels
Returns:
A bool indicating whether or not any labels were updated on the function.
"""
labels_to_update = update_labels or {}
labels_to_update['deployment-tool'] = 'cli-gcloud'
labels_diff = args_labels_util.Diff(additions=labels_to_update,
subtractions=remove_labels,
clear=clear_labels)
messages = api_util.GetApiMessagesModule()
labels_update = labels_diff.Apply(messages.CloudFunction.LabelsValue,
function.labels)
if labels_update.needs_update:
function.labels = labels_update.labels
return True
return False
| [
"saneetk@packtpub.com"
] | saneetk@packtpub.com |
641071ee885c66f1169b6eb4763120d62232a689 | 87ff3c015865940b53e2faac03b8f40c6ae153c0 | /cleancowdiyadic.py | f6ab043d6ebbe7e5eddb1eb15d3724dae85efd96 | [] | no_license | karimn/Colonialism-Database | d6460a4dcceee1fbce00995673036dcc90c6ad5a | 470a89e190d7aaadde49c9994a31267393a1ca62 | refs/heads/master | 2021-01-23T00:10:07.143778 | 2011-08-17T14:57:08 | 2011-08-17T14:57:08 | 660,145 | 1 | 4 | null | null | null | null | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/python
import migtools
from django.db.models import Q
from colonialismdb.economics.models import BilateralTradeDataEntry
if __name__ == "__main__":
for bidata in BilateralTradeDataEntry.objects.filter(Q(exports = -9) | Q(imports = -9), Q(source = 3393)):
if bidata.imports == -9:
if bidata.exports == -9:
bidata.delete()
else:
bidata.imports = None
bidata.save()
else:
bidata.exports = None
bidata.save()
| [
"karimn@bu.edu"
] | karimn@bu.edu |
4374e0f6d09d3fac569ee903abba1a0b69fc1c4a | da7740e0d20dc7dd9775d4a53da7c0f7779834e1 | /MultiPlanarUNet/logging/logger.py | 4147e6a51aeb3f3ead32053006a4c39614f4c56e | [
"MIT"
] | permissive | xiaochengcike/MultiPlanarUNet | ca8fa35a8372b8d107bb16b29018e2413c108075 | 99c73ba2936b63282338cf31fe27086d414d2e62 | refs/heads/master | 2020-04-18T19:25:51.699311 | 2019-01-10T09:39:59 | 2019-01-10T09:39:59 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,492 | py | import os
import inspect
from MultiPlanarUNet.utils.decorators import accepts
class Logger(object):
def __init__(self, base_path, print_to_screen=True, active_file=None,
overwrite_existing=False, print_calling_method=True):
self.base_path = os.path.abspath(base_path)
self.path = os.path.join(self.base_path, "logs")
self.overwrite_existing = overwrite_existing
# Get built in print function
# (if overwritten globally, Logger still maintains a reference to the
# true print function)
self.print_f = __builtins__["print"]
if not os.path.exists(self.path):
os.mkdir(self.path)
# Print options
self.separator = "-" * 80
self.print_to_screen = print_to_screen
self.print_calling_method = print_calling_method
# Set paths to log files
self.log_files = {}
self.currently_logging = {}
self.active_log_file = active_file or "log"
def __repr__(self):
return "<MultiPlanarUNet.logging.Logger object>"
def __str__(self):
return "Logger(base_path=%s, print_to_screen=%s, " \
"overwrite_existing=%s)" % (self.base_path,
self.print_to_screen,
self.overwrite_existing)
def new_log_file(self, filename):
file_path = os.path.join(self.path, "%s.txt" % filename)
if os.path.exists(file_path):
if self.overwrite_existing:
os.remove(file_path)
else:
raise OSError("Logging path: %s already exists. "
"Initialize Logger(overwrite_existing=True) "
"to overwrite." % file_path)
self.log_files[filename] = file_path
self.currently_logging[filename] = None
self.active_log_file = filename
# Add reference to model folder in log
ref = "Log for model in: %s" % self.base_path
self._add_to_log(ref, no_print=True)
@property
def print_to_screen(self):
return self._print_to_screen
@print_to_screen.setter
@accepts(bool)
def print_to_screen(self, value):
self._print_to_screen = value
@property
def print_calling_method(self):
return self._print_calling_method
@print_calling_method.setter
@accepts(bool)
def print_calling_method(self, value):
self._print_calling_method = value
@property
def log(self):
with open(self.log_files[self.active_log_file], "r") as log_f:
return log_f.read()
@property
def active_log_file(self):
return self._active_log_file
@active_log_file.setter
@accepts(str)
def active_log_file(self, file_name):
if file_name not in self.log_files:
self.new_log_file(file_name)
self._active_log_file = file_name
def _add_to_log(self, *args, no_print=False, **kwargs):
if self.print_to_screen and not no_print:
self.print_f(*args, **kwargs)
with open(self.log_files[self.active_log_file], "a") as log_file:
self.print_f(*args, file=log_file, **kwargs)
def _log(self, caller, print_calling_owerwrite=None, *args, **kwargs):
if caller != self.currently_logging[self.active_log_file]:
self.currently_logging[self.active_log_file] = caller
if print_calling_owerwrite is not None:
print_calling = print_calling_owerwrite
else:
print_calling = self.print_calling_method
if print_calling:
self._add_to_log("%s\n>>> Logged by: %s" % (self.separator,
self.currently_logging[self.active_log_file]))
self._add_to_log(*args, **kwargs)
def __call__(self, *args, print_calling_method=None, **kwargs):
caller = inspect.stack()[1]
caller = "'%s' in '%s'" % (caller[3], caller[1].rpartition("/")[2])
self._log(caller, print_calling_method, *args, **kwargs)
def __enter__(self):
"""
Context manager
Sets logger as global print function within context
"""
__builtins__["print"] = self
return self
def __exit__(self, *args):
"""
Revert to default print function in global scope
"""
__builtins__["print"] = self.print_f
return self
| [
"mathias@perslev.com"
] | mathias@perslev.com |
8c3066bff82b46c81a174a93eb7efe733a51dd55 | 87b1320bcd8f1c1efc68995a1e0f259df41e9c3f | /PythonDjango/Django/BeltReviewer/apps/LoginReg/models.py | 99da55601ed5c057aea2634b11026688d1e3b5ef | [] | no_license | colbehr/DojoAssignments | 52690968e3c7796ea5c81de4a62e8c829131aa3c | 8d4dcc9797e37d8563c1e6272ba8b2f7351c3b48 | refs/heads/master | 2021-01-22T23:26:15.564702 | 2017-07-15T05:28:22 | 2017-07-15T05:28:22 | 85,636,972 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,024 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import re
from django.db import models
import bcrypt
# Create your models here.
class UserManager(models.Manager):
def validateUser(self, postData):
print "__________ validateUser (models.py)___________"
response = {}
response["status"] = True
response["errors"] = []
response["data"] = postData
if not postData['fname'] or not postData['lname']:
response["status"] = False
response['errors'].append("Please enter a name")
if bool(re.search(r'\d', str(postData['fname']))) or bool(re.search(r'\d', str(postData['lname']))):
response["status"] = False
response['errors'].append("A name cannot contain a digit")
if len(postData['fname']) < 2 or len(postData['lname']) < 2:
response["status"] = False
response['errors'].append("Name must be longer than 2 characters")
if re.match(r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", postData['email']) is None:
response["status"] = False
response['errors'].append("Please enter a valid email address")
if User.objects.filter(email=postData['email']).exists():
response["status"] = False
response['errors'].append("Email already exists, Please log in!")
if len(postData["pass"]) <= 7:
response["status"] = False
response['errors'].append("Password must be at least 8 characters")
if postData['pass'] != postData["passconf"]:
response["status"] = False
response['errors'].append("Passwords must Match")
if response['status'] is True:
response['user'] = User.objects.create(first_name=response['data']['fname'], last_name=response['data']['fname'],
email=response['data']['email'], password=bcrypt.hashpw(response['data']['pass'].encode(), bcrypt.gensalt()))
return response
def validateLogin(self, postData):
print "__________ validateLogin (models.py)___________"
response = {}
response["status"] = True
response["data"] = postData
if not User.objects.filter(email=postData['email']).exists():
response["status"] = False
return response
response["user"] = User.objects.get(email=postData['email'])
if bcrypt.hashpw(postData['pass'].encode(), response['user'].password.encode()) != response['user'].password:
response["status"] = False
return response
class User(models.Model):
first_name = models.CharField(max_length=50)
last_name = models.CharField(max_length=50)
email = models.EmailField()
password = models.CharField(max_length=50)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
objects = UserManager()
| [
"colbehr@gmail.com"
] | colbehr@gmail.com |
45c917e699a0c7c93ca8e95bc3cab10d8f4ce7dc | 475dfa3b84617602a34b7d49ebafae47b792d08e | /SQL/job09.py | 1a31a219229986aa0d63bbee303319ed0f44f75a | [] | no_license | BrunoSimonDGTL/DGLPython | 168cfbe28389d670d2d298dd171f413d8971da75 | d47ca2b6ae0d098922406a6658ea5f87b3632be2 | refs/heads/main | 2023-04-21T13:35:07.853093 | 2021-04-20T20:36:42 | 2021-04-20T20:36:42 | 358,612,184 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 760 | py | import mysql.connector
print("connexion")
dbname = "laplateforme"
mydb = mysql.connector.connect(
host="localhost",
user="Bruno",
password="",
database=dbname,
)
mycursor = mydb.cursor()
query = """
SELECT name
FROM job;
"""
mycursor.execute(query)
myresult = mycursor.fetchall()
#job = input("Donne un nom de job:")
job = "IDE"
val = (job,)
print("all the job")
print(mycursor.column_names)
for x in myresult:
print(x[0])
query = """
SELECT job.name AS job_name, unit.name AS unit_Name, unit.id
FROM job, unit
WHERE job.name = %s AND unit.id = job.unit_fk;
"""
mycursor.execute(query, val)
myresult = mycursor.fetchall()
print("all the job")
print(mycursor.column_names)
for x in myresult:
print(x)
mydb.close()
mycursor.close()
| [
"bru.sim.bs@gmail.com"
] | bru.sim.bs@gmail.com |
0637d34c345649b17b190752d77694ce2c4b4bb1 | 57c697ffebe2e9b3f5bd5da8122638152e4d0e9f | /contrib/seeds/makeseeds.py | 4b8d889c83c3bb295de84aab0658b5cb0d5ef45c | [
"MIT"
] | permissive | Globycoin/glbcore | 4039ddb98dec19dadebf8b2d583f27e6c083d9cd | d5dd9b5475915956849658373d8658286a08781b | refs/heads/master | 2020-03-22T20:02:44.733133 | 2018-11-15T00:42:39 | 2018-11-15T00:42:39 | 140,569,343 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 5,519 | py | #!/usr/bin/env python3
# Copyright (c) 2013-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 615801
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
""
}
import re
import sys
import dns.resolver
import collections
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/GlobycoinCore:2.2.(0|1|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
if len(sline) > 11:
agent = sline[11][1:] + sline[12][:-1]
else:
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv4 = [ip for ip in ips if ip['net'] == 'ipv4']
ips_ipv6 = [ip for ip in ips if ip['net'] == 'ipv6']
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv4 by ASN
result = []
asn_count = {}
for ip in ips_ipv4:
if len(result) == max_total:
break
try:
asn = int([x.to_text() for x in dns.resolver.query('.'.join(reversed(ip['ip'].split('.'))) + '.origin.asn.cymru.com', 'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# TODO: filter IPv6 by ASN
# Add back non-IPv4
result.extend(ips_ipv6)
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(re.sub(' ', '-', ip['agent']))]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple bitcoin ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
| [
"you@example.com"
] | you@example.com |
b02a1a6ec159f73cb3daa2185c99104551f47d45 | e12812df28879cc051c8ca6abe1c1aa07b8bf4dc | /deps/wslay.gyp | 82ee743afa610ed6433829c09d9b3b2526899055 | [] | no_license | lhecker/libnodecc | 714438b63ab4ed090ce60b151da73b9e5c830129 | 655953fad2cc9c20aa9daa0b22af0504bea6ff89 | refs/heads/master | 2021-01-02T08:20:30.073453 | 2015-11-27T14:51:23 | 2015-11-27T14:51:23 | 20,214,879 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 494 | gyp | {
'targets': [
{
'target_name': 'wslay',
'type': 'static_library',
'include_dirs': [ 'wslay/lib/includes' ],
'defines': [
'WSLAY_VERSION=0.1.1',
],
'sources': [
'wslay/lib/wslay_event.c',
'wslay/lib/wslay_frame.c',
'wslay/lib/wslay_net.c',
'wslay/lib/wslay_queue.c',
'wslay/lib/wslay_stack.c',
],
'direct_dependent_settings': {
'include_dirs': [ 'wslay/lib/includes' ],
'defines': [
'WSLAY_VERSION=0.1.1',
],
},
},
],
}
| [
"leonard@hecker.io"
] | leonard@hecker.io |
f886c22e0fbc3e0a268193239c53656c2954fcc7 | c54f5a7cf6de3ed02d2e02cf867470ea48bd9258 | /pyobjc/pyobjc-core/Lib/objc/_category.py | acfb9a48e26e38dd13712137c8517c88b442e532 | [
"MIT"
] | permissive | orestis/pyobjc | 01ad0e731fbbe0413c2f5ac2f3e91016749146c6 | c30bf50ba29cb562d530e71a9d6c3d8ad75aa230 | refs/heads/master | 2021-01-22T06:54:35.401551 | 2009-09-01T09:24:47 | 2009-09-01T09:24:47 | 16,895 | 8 | 5 | null | null | null | null | UTF-8 | Python | false | false | 2,372 | py | __all__ = ['classAddMethod', 'Category']
from _objc import selector, classAddMethods, objc_class, ivar
from types import FunctionType, MethodType
def classAddMethod(cls, name, method):
"""
Add a single method to a class. 'name' is the ObjC selector
"""
if isinstance(method, selector):
sel = selector(method.callable,
selector=name,
signature=method.signature,
isClassMethod=method.isClassMethod)
else:
sel = selector(method, selector=name)
return classAddMethods(cls, [sel])
#
# Syntactic support for categories
#
class _CategoryMeta(type):
"""
Meta class for categories.
"""
__slots__ = ()
_IGNORENAMES = ('__module__', '__name__', '__doc__')
def _newSubclass(cls, name, bases, methods):
return type.__new__(cls, name, bases, methods)
_newSubclass = classmethod(_newSubclass)
def __new__(cls, name, bases, methods):
if len(bases) != 1:
raise TypeError("Cannot have multiple inheritance with Categories")
c = bases[0].real_class
if c.__name__ != name:
raise TypeError("Category name must be same as class name")
m = [ x[1] for x in methods.iteritems() if x[0] not in cls._IGNORENAMES and isinstance(x[1], (FunctionType, MethodType, selector, classmethod))]
vars = [ x for x in methods.iteritems() if x[0] not in cls._IGNORENAMES and not isinstance(x[1], (FunctionType, MethodType, selector, classmethod))]
for k, v in vars:
if isinstance(v, ivar):
raise TypeError("Cannot add instance variables in a Category")
classAddMethods(c, m)
for k, v in vars:
setattr(c, k, v)
return c
def Category(cls):
"""
Create a category on ``cls``.
Usage:
class SomeClass (Category(SomeClass)):
def method(self):
pass
``SomeClass`` is an existing class that will be rebound to the same
value. The side-effect of this class definition is that the methods
in the class definition will be added to the existing class.
"""
if not isinstance(cls, objc_class):
raise TypeError, "Category can only be used on Objective-C classes"
retval = _CategoryMeta._newSubclass('Category', (), dict(real_class=cls))
return retval
| [
"ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25"
] | ronaldoussoren@f55f28a5-9edb-0310-a011-a803cfcd5d25 |
506f301480830f167a526326889a863364457fd4 | 181ddb2aedae6f4b63d16e22e36b679c200471a5 | /model/model.py | 93a2160f35c678f22ce85328efc749100a9a1bb0 | [] | no_license | eraander/Analyzing-Trump-Tweets | 6beef8de9ed78e1a2801d226e1d0f3d1d2e394a8 | f9b2420194fa8f812ad0c969fed0bb1adf4c5b62 | refs/heads/master | 2021-12-30T05:35:33.739018 | 2021-12-13T06:26:38 | 2021-12-13T06:26:38 | 217,989,059 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,785 | py | '''
Name: model.py
COSI 140B
30 Apr 2019
'''
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import Perceptron
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from sklearn.metrics import classification_report
from sklearn.feature_extraction import DictVectorizer
from sklearn.model_selection import train_test_split
from operator import itemgetter
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
import pandas as pd
import numpy as np
import os, re
#
negative_stems = set(['no', 'not', 't', 'never', 'don\'t', 'can\'t', 'aren', 'didnt', 'wouldn', 'ever', 'shouldn',
'without', 'lack', 'miss', 'couldn', 'isn', 'cannot', 'nt', 'didn', 'don'])
auxiliaries = set(['will', 'shall', 'should', 'can', 'must', 'may', 'did', 'do', 'could', 'would', 'might', 'would',
'want', 'if'])
def keep_only_alpha(text, stemmer):
#text = re.sub(r'\d+', '', text)
text = re.sub(r'https?:\/\/.+', ' ', text)
text = re.sub(r'[^A-Za-z]+', ' ', text)
text = text.lower()
# bigrams = nltk.bigrams(word_t)
# print(bigrams)
return [token for token in word_tokenize(text)]
def change_data(data_frame):
data_frame.is_copy = False
data_frame.event_confidence[data_frame.event_confidence == '+2'] = 'pos'
data_frame.event_confidence[data_frame.event_confidence == '+1'] = 'pos'
data_frame.event_confidence[data_frame.event_confidence == '-1'] = 'neg'
data_frame.event_confidence[data_frame.event_confidence == '-2'] = 'neg'
data_frame = data_frame[data_frame['event_confidence'] != '0']
return data_frame
def make_data_frame(dir_path):
df1 = pd.DataFrame()
for file in os.listdir(dir_path):
if file.endswith('txt'):
f = open(file)
file_content = f.read().strip('\n')
file_annotations = file_content.split('\n\n')
all_fields = [annotation.split('|') for annotation in file_annotations]
data = all_fields[1:]
cols = all_fields[0]
df2 = pd.DataFrame(data=data, columns=cols, )
df1 = pd.concat([df1, df2])
df1.reset_index(inplace=True)
df1['event_text'].str.lower()
# df1.set_value('0', 'event_text',
df1['type'] = 'train'
return df1
def train_to_test(df1):
negative_entries = df1[df1['event_confidence'] == 'neg']
len_neg = len(negative_entries)
train_test_spl = int(0.23*len_neg)
negative_entries['type'][:train_test_spl] = 'test'
pos_sample = df1[df1['event_confidence'] == 'pos']
len_pos = len(pos_sample)
train_test_pos = int(0.27*len_pos)
pos_sample['type'][:train_test_pos] = 'test'
df2 = negative_entries.append(pos_sample)
negative_entries = df1[df1['event_confidence'] == 'neg']
neg_train = negative_entries[negative_entries['type'] == 'train']
return df2
def extract_feat_vocab(data_frame, stemmer):
feat_vocab = dict()
for index, row in data_frame[data_frame['type'] == 'train'].iterrows():
negative = 0
event = keep_only_alpha(row['event_text'], stemmer)
tokens = keep_only_alpha(row['tweet_content'], stemmer)
# print(tokens)
# print(event)
event_index = tokens.index(event[0])
event = tokens[event_index]
# print(event_index)
# print(event[0])
event_start = event_index - 6
if event_start < 0:
event_start = 0
tokens = tokens[event_start:event_index+2]
bigrams = nltk.bigrams(tokens)
key_tokens = [t for t in tokens if t in negative_stems or t in auxiliaries]
if not key_tokens:
tokens.append('likely_posit')
for token in tokens:
if token == event:
feat_vocab['ends_with_' + event[-2:]] = feat_vocab.get('ends_with_' + event[-2:], 0) + 1
unstemmed = token
token = stemmer.stem(token)
if token in negative_stems:
feat_vocab['neg_' + token] = feat_vocab.get('neg_' + token, 0) + 10
elif token in auxiliaries:
feat_vocab['aux_' + token] = feat_vocab.get('aux_' + token, 0) + 10
elif token == 'likely_posit':
feat_vocab['aux_null'] = feat_vocab.get('aux_null', 0) + 10
else:
feat_vocab[token] = feat_vocab.get(token, 0) + 1
if token in row['emotion'].lower().split() or unstemmed in row['emotion'].lower().split():
feat_vocab['emotion_' + token] = feat_vocab.get('emotion_' + token, 0) + 1
emotion_value = row['emotion_value']
feat_vocab[emotion_value] = feat_vocab.get(emotion_value, 0) + 1
# if not row['emotion']:
# feat_vocab['no_emotion'] = feat_vocab.get('no_emotion', 0) + 1
for (token_a, token_b) in bigrams:
if token_a in negative_stems or token_b in negative_stems:
if token_b in negative_stems:
feat_vocab[token_a + '_' + token_b] = feat_vocab.get(token_a + '_' + token_b, 0) + 1
elif token_a in negative_stems:
feat_vocab[token_a + '_' + token_b] = feat_vocab.get(token_a + '_' + token_b, 0) + 1
else:
feat_vocab[token_a + '_' + token_b] = feat_vocab.get(token_a + '_' + token_b, 0) + 1
elif token_b in negative_stems:
feat_vocab[token_a + '_' + token_b] = feat_vocab.get(token_a + '_' + token_b, 0) + 1
else:
feat_vocab[token_a + '_' + token_b] = feat_vocab.get(token_a + '_' + token_b, 0) + 1
negative = 0
return feat_vocab
def select_features(feat_vocab, most_freq=1, least_freq=100):
sorted_feat_vocab = sorted(feat_vocab.items(), key=itemgetter(1), reverse=True)
feat_dict = dict(sorted_feat_vocab[most_freq:len(sorted_feat_vocab)-least_freq])
return set(feat_dict.keys())
def featurize(data_frame, feat_vocab, stemmer):
cols = ['_type_', '_confidence_']
cols.extend(list(feat_vocab))
row_count = data_frame.shape[0]
feat_data_frame = pd.DataFrame(index=np.arange(row_count), columns=cols)
feat_data_frame.fillna(0, inplace=True) #inplace: mutable
for index, row in data_frame.iterrows():
feat_data_frame.loc[index, '_type_'] = row['type']
feat_data_frame.loc[index, '_confidence_'] = row['event_confidence']
for token in keep_only_alpha(row['tweet_content'], stemmer):
if token in feat_vocab:
feat_data_frame.loc[index, token] += 1
return feat_data_frame
def vectorize(feature_csv, split='train'):
df = pd.read_csv(feature_csv, encoding='latin1')
df = df[df['_type_'] == split]
df.fillna(0, inplace=True)
data = list()
for index, row in df.iterrows():
datum = dict()
datum['bias'] = 1
for col in df.columns:
if not (col == "_type_" or col == "_confidence_" or col == 'index'):
datum[col] = row[col]
data.append(datum)
vec = DictVectorizer()
data = vec.fit_transform(data).toarray()
print(data.shape)
labels = df._confidence_.as_matrix()
print(labels.shape)
return data, labels
def train_model(X_train, y_train, model):
model.fit(X_train, y_train)
print ('Shape of model coefficients and intercepts: {} {}'.format(model.coef_.shape, model.intercept_.shape))
return model
def test_model(X_test, y_test, model):
predictions = model.predict(X_test)
report = classification_report(predictions, y_test)
accuracy = accuracy_score(predictions, y_test)
return accuracy, report
def classify(feat_csv):
X_train, y_train = vectorize(feat_csv)
X_test, y_test = vectorize(feat_csv, split='test')
model = LogisticRegression(multi_class='multinomial', penalty='l2', solver='lbfgs', max_iter=500, verbose=1,
class_weight='balanced')
# model = SVC(C=1.0, gamma='auto', class_weight='balanced')
# model = LogisticRegressionCV(cv=5, multi_class='multinomial', max_iter=800)
model = train_model(X_train, y_train, model)
accuracy, report = test_model(X_test, y_test, model)
print (report)
if __name__ == '__main__':
model_path = os.path.curdir
df = make_data_frame(model_path)
df = change_data(df)
df = train_to_test(df)
ps = PorterStemmer()
feat_vocab = extract_feat_vocab(df, ps)
# print(feat_vocab)
selected_feat_vocab = select_features(feat_vocab)
feat_data_frame = featurize(df, selected_feat_vocab, ps)
featfile = os.path.join(os.path.curdir, 'features.csv')
feat_data_frame.to_csv(featfile, encoding='latin1', index=False)
classify('features.csv')
| [
"shaymann@brandeis.edu"
] | shaymann@brandeis.edu |
df8997965cfb07252c3ae7b36952fb4c2e5ce507 | 3e6b9ebde135f7f9fbc98816807d16819ac5d51b | /Homework/hw3/top10.py | 5d9d461217e0da8e9c61da5e2dddc51e2b475165 | [] | no_license | ZepeiZhao/Data-Management | e9d821204a88067e56c5d0be7f295d3ae2f82b9e | 5081d5e7e611bf3cb35f6f0d811c8d48b8644ac8 | refs/heads/master | 2022-07-17T22:28:36.335630 | 2020-05-16T23:01:38 | 2020-05-16T23:01:38 | 264,057,271 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,437 | py | import csv
import sys
import datetime
# running time:0.010792 Seconds
# duration time of mysql execution: 0.0062 sec
# using mysql is faster than using python
# The time complexity of my code is o(nm),
# n and m are the length of table(dict) c and cl
# since there are at most two 'for' loop.
# In cltable function, I construct a dictionary to
# store language and CountryCode pair.
# So, it traverses langauge list and countrylanguage table in two loops.
# About improvement, using different data stucture may work better,
# list traversal is slower than other structure.
# Besides, reducing the number of loop also can help.
# The time complexity is o(n).
# I use conditions as filters for every steps.
# For example, first, I choose official language as a constraint to reduce list length.
# So the space complexity is reduced.
def readCSV(csvFile):
reader = csv.DictReader(csvFile)
rows = [row for row in reader]
csvFile.close()
return rows
def newlist(dict_):
res = []
for i in dict_:
temp = {}
if dict_ == dict1:
if i.keys() == '#Code' or 'Population':
temp['#Code'] = i['#Code']
temp['Population'] = i['Population']
res.append(temp)
else:
if i.keys() == '#CountryCode' or '#Language' or 'IsOfficial':
if i['IsOfficial'] == 'T':
temp['#CountryCode'] = i['#CountryCode']
temp['#Language'] = i['#Language']
res.append(temp)
return res
#country table with population>1000000
def ctable(country):
c = []
for i in country:
tmp = {}
if 'Population' in i.keys():
if int(i['Population']) > 1000000:
tmp['#Code'] = i['#Code']
tmp['Population'] = int(i['Population'])
c.append(tmp)
return c
#cl table {language:[countrycode]}
def cltable(countrylanguage):
cl = {}
tmplist = []
for i in countrylanguage:
tmplist.append(i['#Language'])
for j in tmplist:
sublist = []
for i in countrylanguage:
if j == i['#Language']:
sublist.append(i['#CountryCode'])
cl[j] = sublist
return cl
#get dict{language:[population]}
def getlan_po(cl,c):
newdict = {}
for i in cl:
tmp_ = []
for j in c:
if j['#Code'] in cl[i]:
tmp_.append(j['Population'])
newdict[i] = tmp_
return newdict
def getSum(newdict):
dic_ = {}
for i in newdict:
dic_[i] = sum(newdict[i])
return dic_
def getTop(dic_):
topList = sorted(dic_.items(),key = lambda item:item[1])[::-1]
return topList
def getRes(topList):
top10 = topList[:10]
result = []
for i in top10:
result.append(i[0])
return result
if __name__ == "__main__":
start = datetime.datetime.now()
file1 = sys.argv[1]
file2 = sys.argv[2]
csvFile1 = open(file1, "r")
csvFile2 = open(file2, "r")
dict1 = readCSV(csvFile1)
dict2 = readCSV(csvFile2)
country = newlist(dict1)
countrylanguage = newlist(dict2)
c = ctable(country)
cl = cltable(countrylanguage)
newdict = getlan_po(cl, c)
dic_ = getSum(newdict)
topList = getTop(dic_)
result = getRes(topList)
print(result)
end = datetime.datetime.now()
print('running time:%s Seconds' %(end-start))
| [
"noreply@github.com"
] | ZepeiZhao.noreply@github.com |
3d03c774171e77126bf51389e5039db694b2f43b | 08ff8a541fb2c71e30924e9de33413f3f1406ea6 | /Clases/CsvConverter.py | a261127315ea2d9b58e5d27a02d94ce43aa0a615 | [] | no_license | mkiro/PythonScrapping | e3fd9291aa6bc2567fd1f859acf44ae9ce3649d7 | 91772e466833dad721e5ab5a746373343232e834 | refs/heads/master | 2023-04-07T18:57:57.555101 | 2021-04-10T18:57:33 | 2021-04-10T18:57:33 | 356,667,989 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 61 | py | class CsvConverter(object):
"""description of class"""
| [
"maribel.quiros@redarbor.csccsl.com"
] | maribel.quiros@redarbor.csccsl.com |
a46baff93c270cf4f95319ea20ee6ee192bffbaf | 941bdecc9a2889dded7c3aad579d8f0882423335 | /ideal.py | c87889cc06be4261979e1bb49b7d4f78713899eb | [] | no_license | MaratZakirov/playground | 3dd17ec121972d715de0964ec2d28b34600cb42d | b58dfe046f102b866f1d0606df72ffa53e34dfd7 | refs/heads/master | 2023-02-10T12:00:53.636256 | 2021-01-12T15:26:28 | 2021-01-12T15:26:28 | 269,359,783 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,550 | py | import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import norm
np.random.seed(0)
"""
a = np.array([3, 3, 3, 0, 0, 0, 1, 2, 12])
x,y,z = np.unique(a, return_counts=True, return_index=True)
print(x, y, z)
print(np.repeat(x, z))
"""
from ctypes import cdll
import ctypes
import numpy as np
from numpy.ctypeslib import ndpointer
cpputils = cdll.LoadLibrary("./cpputils.so")
# Call this function in C++
# int * GetCuts(int node_num, int edge_num, int * nodes_from, int * nodes_to, float * weigh)
def getColPairs(Xint):
N = len(Xint)
x = np.copy(Xint[:, 0])
y = np.copy(Xint[:, 1])
Pa = np.zeros(N, dtype=np.int32) - 1
Pb = np.zeros(N, dtype=np.int32) - 1
nums = np.arange(N, dtype=np.int32)
np.random.shuffle(nums)
cpputils.getColPairs.argtypes = [ctypes.c_int,
ndpointer(dtype=ctypes.c_int, shape=(N,)),
ndpointer(dtype=ctypes.c_int, shape=(N,)),
ndpointer(dtype=ctypes.c_int, shape=(N,)),
ndpointer(dtype=ctypes.c_int, shape=(N,)),
ndpointer(dtype=ctypes.c_int, shape=(N,))]
cpputils.getColPairs(N, nums, x, y, Pa, Pb)
Pa = Pa[Pa >= 0]
Pb = Pb[Pb >= 0]
if len(Pa) > 0:
assert np.abs(Xint[Pa] - Xint[Pb]).max() == 0
return Pa, Pb
# Macro parameters
x0 = 0; x1 = 2
y0 = 0; y1 = 24
# TODO set to 8000 for evaluation
N = 9000
EPO = 10000
L = 10
Levs = 3
period = 10
# State
#m = np.random.choice([0.7, 1.3, 1.0, 0.8, 1.2, 0.6, 1.4], size=(N, 1))
m = np.random.uniform(low=0.8, high=1.2, size=(N, 1))
r = 0.01
X = np.random.uniform((x0, y0), (x0 + 1, y0 + 1), (N, 2))
V = 13 * np.random.randn(N, 2)
#V = 40 * (np.random.rand(N, 2) - 0.5)
g = np.array([0, -9.8])
dt = 0.01
lines = np.array([[x0, y0, x1, y0],
[x0, y1, x1, y1],
[x0, y0, x0, y1],
[x1, y0, x1, y1]])
# For statistics storing
x_data = []
v_data = []
Ek_data = []
Ep_data = []
Ek_levs = []
N_levs = []
CH = []
# Every particle in ideal gas model has same amount of energy
Ef_const = m[:, 0] * X[:, 1] * (-g[1]) + (m * V * V / 2).sum(1)
I_prev = np.zeros(N).astype(bool)
def CollisionSimplifiedFast(X, V, m, r=0.002, alpha=0.0):
Xint = (X / r).astype(np.int32)
P_a, P_b = getColPairs(Xint)
if len(P_a) > 0:
assert len(P_a) == len(P_b)
m_a = m[P_a]
m_b = m[P_b]
v_a = V[P_a]
v_b = V[P_b]
"""
if 0:
E_beg = m[P_a] * norm(V[P_a], axis=1, keepdims=True) ** 2 + m[P_b] * norm(V[P_b], axis=1, keepdims=True) ** 2
V[P_a] *= np.random.randn(len(P_a), 2)
V[P_b] *= np.random.randn(len(P_b), 2)
E_end = m[P_a] * norm(V[P_a], axis=1, keepdims=True) ** 2 + m[P_b] * norm(V[P_b], axis=1, keepdims=True) ** 2
coef = np.sqrt(E_beg/E_end)
V[P_a] = V[P_a] * coef
V[P_b] = V[P_b] * coef
return len(P_a)
"""
V[P_a] = ((m_a - m_b) * v_a + 2 * m_b * v_b) / (m_a + m_b)
V[P_b] = ((m_b - m_a) * v_b + 2 * m_a * v_a) / (m_a + m_b)
# Mixing energy on alpha amount
if 0:#alpha > 0:
E_beg = m[P_a] * norm(V[P_a], axis=1, keepdims=True) ** 2 + m[P_b] * norm(V[P_b], axis=1, keepdims=True) ** 2
V[P_a] = V[P_a] * np.random.uniform(low=1-alpha, high=1+alpha, size=(len(P_a), 1))
V[P_b] = V[P_b] * np.random.uniform(low=1-alpha, high=1+alpha, size=(len(P_a), 1))
E_end = m[P_a] * norm(V[P_a], axis=1, keepdims=True) ** 2 + m[P_b] * norm(V[P_b], axis=1, keepdims=True) ** 2
V[P_a] = V[P_a] * np.sqrt(E_beg/E_end)
V[P_b] = V[P_b] * np.sqrt(E_beg/E_end)
return len(P_a)
"""
def CollisionFastAndRough(X, V, m, h=0.4, alpha=0.1):
# First select randomly region
xy_b = np.random.uniform(low=(x0, y0), high=(x1-h, y1-h))
Isel = (X[:, 0] > xy_b[0]) & (X[:, 1] > xy_b[1]) & (X[:, 0] < (xy_b[0] + h)) & (X[:, 1] < (xy_b[1] + h))
if Isel.sum() >= 2:
P_ab = np.where(Isel)[0][:(Isel.sum()//2)*2]
np.random.shuffle(P_ab)
P_a = P_ab[:len(P_ab)//2]
P_b = P_ab[-len(P_ab)//2:]
assert len(P_a) == len(P_b)
# TODO This scheme doesnt help intersting... total randomnes is bad...
if 0:
E_beg = m[P_a] * norm(V[P_a], axis=1, keepdims=True) ** 2 + m[P_b] * norm(V[P_b], axis=1, keepdims=True) ** 2
V[P_a] = np.random.randn(len(P_a), 2)
V[P_b] = np.random.randn(len(P_b), 2)
E_end = m[P_a] * norm(V[P_a], axis=1, keepdims=True) ** 2 + m[P_b] * norm(V[P_b], axis=1, keepdims=True) ** 2
coef = np.sqrt(E_beg.sum()/E_end.sum())
V[P_a] = V[P_a] * coef
V[P_b] = V[P_b] * coef
return Isel.sum() // 2
m_a = m[P_a]
m_b = m[P_b]
v_a = V[P_a]
v_b = V[P_b]
V[P_a] = ((m_a - m_b) * v_a + 2 * m_b * v_b) / (m_a + m_b)
V[P_b] = ((m_b - m_a) * v_b + 2 * m_a * v_a) / (m_a + m_b)
if 1:
E_beg = m[P_a] * norm(V[P_a], axis=1, keepdims=True) ** 2 + m[P_b] * norm(V[P_b], axis=1, keepdims=True) ** 2
V[P_a] *= (1 + 0.2 * np.random.randn(len(P_a), 2))
V[P_b] *= (1 + 0.2 * np.random.randn(len(P_b), 2))
E_end = m[P_a] * norm(V[P_a], axis=1, keepdims=True) ** 2 + m[P_b] * norm(V[P_b], axis=1, keepdims=True) ** 2
coef = np.sqrt(E_beg/E_end)
V[P_a] = V[P_a] * coef
V[P_b] = V[P_b] * coef
return Isel.sum() // 2
# Mixing energy on alpha amount
if 0:#alpha > 0.0:
E = m[P_ab] * np.linalg.norm(V[P_ab], axis=1, keepdims=True) ** 2
E_n = np.clip(E.std() * np.random.randn(len(E), 1) + E.mean(), a_min=0.1, a_max=1000000)
E_n = E_n * (E.sum() / E_n.sum())
E_n = E * (1 - alpha) + E_n * alpha
E_n = E_n * (E.sum() / E_n.sum())
V[P_ab] = (V[P_ab] / np.sqrt(E)) * np.sqrt(E_n)
return Isel.sum() // 2
def energyMixer(m, V, Iy0):
alpha = 0.4
n = Iy0.sum()
# Storing kinetic energy of affected particles
E = m[Iy0] * np.linalg.norm(V[Iy0], axis=1, keepdims=True) ** 2
# E_n = np.clip(E.std() * np.random.randn(n, 1) + E.mean(), a_min=0, a_max=1000000)
E_n = 0 * E + E.mean(keepdims=True)
E_n = E_n * (E.sum() / E_n.sum())
E_n = E * (1 - alpha) + E_n * alpha
E_n = E_n * (E.sum() / E_n.sum())
V[Iy0] = (V[Iy0] / np.sqrt(E)) * np.sqrt(E_n)
if (V != V).any():
assert 0
"""
for epoch in range(EPO):
X_n = X + V * dt + 0.5 * g * dt ** 2
V = V + g * dt
# Fix velocities to return particles in volume
Ix = (X_n[:, 0] < x0) + (X_n[:, 0] > x1)
Iy = (X_n[:, 1] < y0) + (X_n[:, 1] > y1)
Iy0 = X_n[:, 1] < y0
Iy1 = X_n[:, 1] > y1
V[Ix, 0] *= -1
V[Iy0, 1] = np.abs(V[Iy0, 1])
V[Iy1, 1] = -np.abs(V[Iy1, 1])
col_r = None
if True:
col_r = CollisionSimplifiedFast(X_n, V, m)
#if True:
# col_r = CollisionFastAndRough(X_n, V, m)
#if False and Iy0.sum() >= 2:
# energyMixer(m, V, Iy0)
X = X_n
if epoch % period == 0:
assert not (V != V).any()
print(epoch)
# Store statistics
x_data.append(X)
v_data.append(V)
Ek_data.append((m * V * V / 2).sum(1))
Ep_data.append(m[:, 0] * X[:, 1] * (-g[1]))
h_lev = np.clip((Levs * X[:, 1] / (y1 - y0)).astype(int), a_min=0, a_max=Levs - 1)
Ek_lev = [0] * Levs
N_lev = [0] * Levs
for l in range(Levs):
Ek_lev[l] = Ek_data[-1][h_lev == l].mean().round(4)
N_lev[l] = (h_lev == l).sum()
print('Number levels: ', N_lev)
print('E kinetic levels:', Ek_lev, Ek_data[-1].mean())
print('Collison rate:', col_r)
Ek_levs.append(Ek_lev)
N_levs.append(N_lev)
# After loop
x_data = np.stack(x_data)
v_data = np.stack(v_data)
Ek_data = np.array(Ek_data)
Ep_data = np.array(Ep_data)
Ek_levs = np.array(Ek_levs)
N_levs = np.array(N_levs)
if len(CH) > 0:
CH = np.concatenate(CH)
print(np.histogram(CH, bins=20)[0])
plt.hist(CH, bins=100)
plt.show()
print('Stalled particles:', (x_data[-234:, :, 1].max(axis=0) < 0).sum())
NN = len(Ek_data) // 4
print('T_whole', Ek_levs[-NN:].mean(axis=0))
print('N_whole', N_levs[-NN:].mean(axis=0), N_levs[-NN:].mean(axis=0).sum())
fig, ax1 = plt.subplots()
ax1.plot(N_levs[-NN:].mean(axis=0), color='tab:blue')
ax1.set_xlabel('h level')
ax1.set_ylabel('Num of particles')
ax2 = ax1.twinx()
ax2.plot(Ek_levs[-NN:].mean(axis=0), color='tab:red')
ax2.set_ylabel('T or average kinetic energy of particles')
fig.tight_layout()
plt.show()
Ef = Ep_data.sum(1) + Ek_data.sum(1)
plt.ylim(Ef.min() - 0.02 * Ef.mean(), Ef.max() + 0.02 * Ef.mean())
plt.plot(Ep_data.sum(1) + Ek_data.sum(1))
plt.legend(['Full energy'])
plt.show()
plt.plot(Ek_levs[:, 2])
plt.plot(Ek_levs[:, 1])
plt.plot(Ek_levs[:, 0])
plt.plot(Ek_data.mean(1))
plt.legend(['Lev 2', 'Lev 1', 'Lev 0', 'Lev com'])
plt.show()
Efull = Ek_data + Ep_data
fig, (ax1, ax2, ax3) = plt.subplots(nrows=3, sharex=True)
ax1.hist(Ep_data[-1], bins=32, alpha=0.3, color='red', label="E potential")
ax2.hist(Ek_data[-1], bins=32, alpha=0.3, color='blue', label="E kinetic")
ax3.hist(Efull[-1], bins=32, alpha=0.3, color='green', label="E full")
ax1.legend(['E potential'])
ax2.legend(['E kintectic'])
ax3.legend(['E full'])
plt.show() | [
"marat61@gmail.com"
] | marat61@gmail.com |
922e9f1c4c745cffd569797c8f29fb34cb230e41 | 6b720d7bd5237ae2d087ed7c2cd360931ecb9c6b | /incident_response/shell_finder-2.0/shellfinder.py | db6219c1b209944962133e9b92dd408ef0946c9f | [] | no_license | mukareste/utils | 4609409d241c2d82a9c29afdec542d3c66e8bd3e | 39ac095ac747453018590f002134c8430e272a80 | refs/heads/master | 2020-05-17T04:58:43.454412 | 2013-12-03T08:03:20 | 2013-12-03T08:03:20 | 6,629,176 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 876 | py | #!/usr/bin/env python
from sfutils.helpers.GenOps import *
from sfutils.scanner.Scanner import *
import time
from datetime import datetime
config_file = 'shellfinder.ini'
def main():
""" The main func """
operations = GenOps()
finder = SearchShells()
strings_file, domains_file, report_file, legits, from_address, to_address, cc_address, header = operations.GetOptions(config_file)
users = operations.GetAccounts(domains_file)
timer_start = time.time()
compromised = finder.GrepForShells(users, strings_file, legits)
timer_end = time.time()
timer_elapsed = timer_end - timer_start
report = operations.ProduceReport(timer_start, timer_end, timer_elapsed, compromised, report_file)
mail_report = operations.MailReport(report_file, from_address, to_address, cc_address, header, compromised)
if __name__ == "__main__":
main()
| [
"mitchell@tufala.net"
] | mitchell@tufala.net |
cffd4e5f3fa1d886af2822d7b6c077cd4c119148 | 9ecb0245c7b2a35a58e6a16aebcb9b1bf049f78d | /E_Dict/dictionary/urls.py | 2b8c5882d531c2bfc9a654003a496cc3955aec23 | [] | no_license | roshan-ican/english-dictionary | b82e34d4e28075408606acb24d01be0dce5cc1c1 | 59f684612c886e92dd77140a27de54b003c5a3c9 | refs/heads/master | 2023-04-20T07:22:43.004307 | 2021-05-20T15:36:35 | 2021-05-20T15:36:35 | 369,256,104 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 151 | py | from django.urls import path
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('word', views.word, name='word')
] | [
"roshan-ican"
] | roshan-ican |
0d71d3a4c0162b63e70418d28879460cc7283f4f | 591fb0d8cd5abc0d0407cb60959e12d95f3fd33d | /3.Kod_Graya.py | 6134f670f63a87959824e4b330d606586d10d526 | [] | no_license | macmro7/Algorytmy_kombinatoryczne | 8bace1f877c6d8559328f39ca4664e0b2c1daac1 | 6e9ffc55293894bf31c672b4397467d4df1140e6 | refs/heads/main | 2023-05-06T04:48:35.206834 | 2021-05-18T20:37:55 | 2021-05-18T20:37:55 | 368,659,439 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,387 | py | #algorytm generujący wszystkie podzbiory zbioru {1, . . . , n} w porządku minimalnych zmian (Graya), wykorzystujący wagi Hamminga
def gray(n):
A = [0] * n
B = []
print(B)
while True:
B = []
sum = 0
for i in A:
sum += i
if sum % 2 == 0:
A[-1] = 1 - A[-1]
else:
i = n
while i > 0:
i -= 1
if A[i] == 1:
A[i - 1] = 1 - A[i - 1]
break
if i == 0:
break
for i in range(len(A)):
if A[i] == 1:
B.append(i + 1)
print(B)
gray(3)
#algorytm obliczający rangę podzbioru T w uporządkowaniu minimalnych zmian (Graya) podzbiorów zbioru {1, . . . , n}
def grey_rank(n, T):
r = 0
b = 0
for i in range(n - 1, -1, -1):
if n - i in T:
b = 1 - b
if b == 1:
r += pow(2, i)
return r
print(grey_rank(4, [1, 3]))
#algorytm wyznaczający podzbiór T o zadanej pozycji r w uporządkowaniu minimalnych zmian (Graya) podzbiorów zbioru {1, . . . , n}
def gray_unrank(n, r):
T = []
c = 0
for i in range(n - 1, -1, -1):
b = r // pow(2, i)
if b != c:
T.append(n - i)
c = b
r = r - b * pow(2, i)
return T
print(gray_unrank(4, 12))
| [
"noreply@github.com"
] | macmro7.noreply@github.com |
0d410418264ad0e178f4ddc38a54ed8162f11dfa | 00d2862c4913bf2a323d43e95f19c1beac67e062 | /Dictionaries/loop_4.py | 7a3a88705ebcef46b79e31fd9621f317320b4afb | [] | no_license | kamonchat26/workshop2 | 671fbf074b7e85dcae9783adfc410bbf1b8f30de | 2e34c0b402797bc2970f89e7d9eaff731af5f845 | refs/heads/master | 2023-03-09T22:20:31.409693 | 2021-02-20T17:05:28 | 2021-02-20T17:05:28 | 328,288,492 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 118 | py | thisdict = {"brand": "Ford", "model": "Mutang", "year": 1964}
# ex4
for value in thisdict.values():
print(value) | [
"kamonchat2607@gmail.com"
] | kamonchat2607@gmail.com |
188d763707a1a28e9d548cc56917af50d3116102 | 1134442ea1c776b45ab623991e4bd85b2a2d51a2 | /animals/__init__.py | 16f5b79d0bbe74927f3b92f09e4229da27804926 | [] | no_license | SLLittrell/Python-server | e88292f40f50a03fe77c5c772a650e7b76814b08 | a4884c04431d3c9090561682b60335633077b2ca | refs/heads/main | 2023-04-09T05:27:29.013986 | 2021-04-15T18:54:04 | 2021-04-15T18:54:04 | 355,966,237 | 0 | 0 | null | 2021-04-15T18:54:04 | 2021-04-08T15:47:09 | Python | UTF-8 | Python | false | false | 270 | py | from .request import get_all_animals
from .request import get_single_animal
from .request import get_animals_by_location
from .request import get_animals_by_status
from .request import create_animal
from .request import delete_animal
from .request import update_animal
| [
"stacey.littrell@gmail.com"
] | stacey.littrell@gmail.com |
9deeaaf0a97dccc45a1a17bcb855fc25264c7087 | ad7c1a49202a12cc1651b0d005247c38f9643652 | /Problem043.py | 6417fbf5e57d26934a06f9a8a5d40b097c833d8d | [] | no_license | Akhunrah/Project-Euler | 9816aaaf614b1368e6e5fd28f2a8e7da30f1ee56 | 979515aa743d4b033c89a20e2d6d2f197955ab65 | refs/heads/master | 2021-01-13T00:46:04.225488 | 2016-02-26T19:01:47 | 2016-02-26T19:01:47 | 52,624,649 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | import itertools
def fitsPattern(perm):
divisors = [2,3,5,7,11,13,17]
for i in range(1,8):
numString = ''.join([perm[i],perm[i+1],perm[i+2]])
number = int(numString)
divisor = divisors[i-1]
if number%divisor !=0:
return False
return True
digits = ['0','1','2','3','4','5','6','7','8','9']
#print list(itertools.permutations(digits))
numSum = 0
for perm in list(itertools.permutations(digits)):
if fitsPattern(perm):
print int(''.join(perm))
numSum+= int(''.join(perm))
print numSum
| [
"Brendan.Bulthuis@gmail.com"
] | Brendan.Bulthuis@gmail.com |
68ab3b2e8cdb47c37425eeb7c53f64f7351ba1df | c09cdcc83c6d58df3a3619a9a50432d2120d477f | /print_me.py | 11519d4157b30ce88a3fb8010d2c4278db30b582 | [] | no_license | ZSchweyk/Ex-5-Kivy-UI | fa658a844cd8ebecc432c6470fe809565fd65a80 | 9c6284de1809498c6e7f8e2112999c88d1edc2c9 | refs/heads/main | 2023-08-21T05:37:44.337554 | 2021-10-07T20:59:55 | 2021-10-07T20:59:55 | 402,115,983 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 73 | py | import os
result = os.system("cat print_me.py")
print(str(result)[:-1]) | [
"zeynschweyk@dpengineering.org"
] | zeynschweyk@dpengineering.org |
c5f0f43335bb7eee33c85bf0f6b602d9f2ade59a | 69ef463375205f06261460a8f3c73895402856d2 | /command_line_demo.py | 4741c5beebca1fe493bd96bf493ed1681b1e081d | [] | no_license | ScazLab/AT-POMDP | fe5b416772fe020fd1cf970e89f46d6c4a807e99 | f97ffaa99c7b09ee52e6f5b0482e55ce64df07e4 | refs/heads/master | 2020-04-05T05:33:39.912945 | 2018-12-17T17:53:57 | 2018-12-17T17:53:57 | 156,600,793 | 5 | 2 | null | null | null | null | UTF-8 | Python | false | false | 6,975 | py | import sys
import json
import numpy as np
from task_models.lib.pomdp import POMDP, GraphPolicyBeliefRunner
from pomdp_setup_reward_transition_matrices import *
from pomdp_setup_observation_matrices import *
def test_command_line_sequence(param_file):
#read in params
with open(param_file) as data_file:
params = json.load(data_file)
# discount factor
discount = params["discount"]
# state variables
knowledge_states = params["knowledge_states"]
engagement_states = params["engagement_states"]
attempt_states = params["attempt_states"]
num_knowledge_levels = len(knowledge_states)
num_engagement_levels = len(engagement_states)
num_attempts = len(attempt_states)
all_states = combine_states_to_one_list(knowledge_states, engagement_states, attempt_states)
num_states = len(all_states)
# starting distribution
start = np.zeros(num_states)
num_start_states = num_knowledge_levels * num_engagement_levels
for i in range(num_states):
if i%num_attempts == 0:
start[i] = 1.0 / float(num_start_states)
else:
start[i] = 0.0
# probabilities associated with the transition matrix
prob_knowledge_gain = params["prob_knowledge_gain"]
prob_engagement_gain = params["prob_engagement_gain"]
prob_engagement_loss = params["prob_engagement_loss"]
prob_correct_answer = params["prob_correct_answer"]
prob_correct_answer_after_1_attempt = params["prob_correct_answer_after_1_attempt"]
prob_drop_for_low_engagement = params["prob_drop_for_low_engagement"]
# actions
actions = params["actions"]
num_actions = len(actions)
# action-related reward variables
action_rewards = params["action_rewards"]
engagement_reward = params["engagement_reward"]
knowledge_reward = params["knowledge_reward"]
end_state_remain_reward = params["end_state_remain_reward"]
reward_for_first_attempt_actions = params["reward_for_first_attempt_actions"]
action_prob_knowledge_gain_mult = params["action_prob_knowledge_gain_mult"]
action_prob_engagement_gain_mult = params["action_prob_engagement_gain_mult"]
# observations
correctness_obs = params["correctness_obs"]
speed_obs = params["speed_obs"]
all_obs = combine_obs_types_to_one_list(correctness_obs, speed_obs)
num_observations = len(all_obs)
# observation related variables
prob_speeds_for_low_engagement = params["prob_speeds_for_low_engagement"]
prob_speeds_for_high_engagement = params["prob_speeds_for_high_engagement"]
action_speed_multipliers = np.array(params["action_speed_multipliers"])
R = generate_reward_matrix(actions=actions,
action_rewards=action_rewards,
engagement_reward=engagement_reward,
knowledge_reward=knowledge_reward,
end_state_remain_reward=end_state_remain_reward,
num_knowledge_levels=num_knowledge_levels,
num_engagement_levels=num_engagement_levels,
num_attempts=num_attempts,
num_observations=num_observations,
reward_for_first_attempt_actions=reward_for_first_attempt_actions)
T = generate_transition_matrix(num_knowledge_levels=num_knowledge_levels,
num_engagement_levels=num_engagement_levels,
num_attempts=num_attempts,
prob_knowledge_gain=prob_knowledge_gain,
prob_engagement_gain=prob_engagement_gain,
prob_engagement_loss=prob_engagement_loss,
action_prob_knowledge_gain_mult=action_prob_knowledge_gain_mult,
action_prob_engagement_gain_mult=action_prob_engagement_gain_mult,
prob_correct_answer=prob_correct_answer,
prob_correct_answer_after_1_attempt=prob_correct_answer_after_1_attempt,
prob_drop_for_low_engagement=prob_drop_for_low_engagement)
O = generate_observation_matrix(knowledge_states=knowledge_states,
engagement_states=engagement_states,
attempt_states=attempt_states,
correctness_obs=correctness_obs,
speed_obs=speed_obs,
num_actions=num_actions,
prob_speeds_for_low_engagement=prob_speeds_for_low_engagement,
prob_speeds_for_high_engagement=prob_speeds_for_high_engagement,
action_speed_multipliers=action_speed_multipliers)
#create POMDP model
simple_pomdp = POMDP(T, O, R, np.array(start), discount, states=all_states, actions=actions,
observations=all_obs, values='reward')
simple_pomdp_graph_policy = simple_pomdp.solve(method='grid', verbose=False, n_iterations=500)
simple_pomdp_graph_policy_belief_runner = GraphPolicyBeliefRunner(simple_pomdp_graph_policy,
simple_pomdp)
num_states_per_knowledge_level = num_engagement_levels * num_attempts
problem_num = 1
attempt_num = 1
receiving_obs = True
while receiving_obs is True:
obs = raw_input("Enter observation: ")
if obs == "done":
receiving_obs = False
break
if obs not in all_obs:
print "Invalid observation provided\n"
continue
knowledge_level_index = 0
action = simple_pomdp_graph_policy_belief_runner.get_action()
current_belief = simple_pomdp_graph_policy_belief_runner.step(obs, action)
print "\nProblem %i, Attempt %i: (%s, %s)" % (problem_num, attempt_num, action, obs)
belief_str = ""
sum_across_states = 0.0
for k in range(num_states):
sum_across_states += current_belief[k]
if k % num_attempts == num_attempts - 1:
belief_str += "%s: %.3f\t\t" % (all_states[k][:-3], sum_across_states)
knowledge_level_index += 1
sum_across_states = 0.0
if k % num_states_per_knowledge_level == num_states_per_knowledge_level-1:
belief_str += "\n"
print belief_str
if "R" in obs or attempt_num == 3:
problem_num += 1
attempt_num = 1
else:
attempt_num += 1
if __name__ == "__main__":
if len(sys.argv) > 1:
test_command_line_sequence(sys.argv[1])
else:
print "please provide the name of the input parameter file as a command line argument"
| [
"aditiramachandran@gmail.com"
] | aditiramachandran@gmail.com |
471a3f4be2a981c0438a08995be929dfe49177bf | 75882e6c45faa15672fa2a7dc2fe0cf5d0ca1296 | /Project_edge-v01.py | a65c7425d8246081cd50cb676d264f10ba6d3ad7 | [] | no_license | fosdick/map-gen | d2010449a412dbb62fbc9f183dee0e866d05294e | f81b42c3219e31c4a5c38b8b5898d52ec2bb97c8 | refs/heads/master | 2020-06-20T07:54:05.548593 | 2019-07-15T19:02:59 | 2019-07-15T19:02:59 | 197,050,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,006 | py | ##master plan generator
##jarvis fosdick 10-28-05
import math, Image, ImageDraw, random, cmath, os
wi = 320
hi = 108
xsc = 3
ysc = 3
s_name = 'fin_plan_003.tif'
scr = Image.new('RGB',(wi,hi),(0,0,0))
base = Image.open('h:/c_bak/la446/project/landuse/__site-area.tif')
ebldgs = Image.open('h:/c_bak/la446/project/landuse/__site-e-bldg.tif')
##eroads = Image.open('h:/c_bak/la446/project/landuse/__site-e-roads.tif')
eroads = Image.open('h:/c_bak/la446/project/landuse/__site-design1.tif')
rden = Image.open('h:/c_bak/la446/project/landuse/__site-design1-cen.tif')
##rden = Image.open('h:/c_bak/la446/project/landuse/__site-roads-enhance1.tif')
use = Image.open('h:/c_bak/la446/project/landuse/__site-use-pro.tif')
c001 = Image.open('h:/c_bak/la446/project/landuse/__site-pro-center001.tif')
c002 = Image.open('h:/c_bak/la446/project/landuse/__site-pro-center002.tif')
sedge = Image.open('h:/c_bak/la446/project/landuse/__site-edge.tif')
bb = (0,0,99,32)
def area_cnt(im, pat):
cnt = 0
for i in im.getdata():
if i == pat:
cnt = cnt+1
return cnt
def image2matrix(im):
bb = im.getbbox()
nl=set()
for x in range(bb[0],bb[2]):
for y in range(bb[1],bb[3]):
nl.add([(x,y) + base.getpixel((x,y))])
return nl
def fill_order(bb):
#bb = im.getbbox()
p1 = getmpt(bb)
pat_list = [p1]
loop_cnt = 1
cnt = 0
pcnt = 0
while cnt < (bb[2]*bb[3])/4:
if bb[2] > (p1[0]+(cnt+int((math.cos(pcnt*math.pi))))) and bb[3] > (p1[1]+(cnt+(int(math.sin(pcnt*math.pi))))):
pat_list.append([(p1[0]+(cnt*int((math.cos(pcnt*math.pi))))),(p1[1]+(cnt*int((math.sin(pcnt*math.pi)))))])
loop_cnt=loop_cnt+1
pcnt = pcnt + 0.5
if loop_cnt==4:
loop_cnt=0
cnt = cnt+1
return pat_list
def scale(pt):
sc_ss = set()
for a in range((pt[0]*xsc),((pt[0]+1)*(xsc))):
for b in range((pt[1]*ysc),((pt[1]+1)*(ysc))):
sc_ss.add((a,b))
return sc_ss
def prange(tl, br):
pr = []
for x in range(tl[0], br[0]):
for y in range(tl[1], br[1]):
pr.append([x,y])
return pr
def fill_color(ss, color, scr):
for i in ss:
scr.putpixel(i, color)
def fill_pattern(im, ss, pattern):
#s = list(ss)
#s.sort
#ss = set(s)
imp = Image.open(getrpic(pattern))
idata = imp.getdata()
#p_fill = fill_order((pt[0],pt[1],(im.getbbox()[2]+pt[0]),(im.getbbox()[3]+pt[1])))
cnt = 0
for i in ss:
im.putpixel(i,idata[cnt])
if cnt == len(idata):
cnt = 0
else:
cnt = cnt+1
return im
## for i in ss:
## for e in prange(i,((i[0]*xsc),(i[1]*ysc))):
## scr.putpixel((i[0]+e[0],i[1]+e[1]), imp.getpixel(e))
def cknbr(base, color, x, y):
nl=set()
bb = base.getbbox()
#px = fl.getpixel((x,y))
for a in range(x-1, x+2):
for b in range(y-1, y+2):
if bb[0] < a < bb[2] and bb[1] < b < bb[3]:
if color == base.getpixel((a,b)):
nl.add((a,b))
return nl
def look4contig(base, x, y) :
flag = 1
cnt2 = 0
color = base.getpixel((x,y))
ss = set([])
while True :
intss = ss
ss = ss|(cknbr(base, color, x, y))
if len(ss) > 1:
for a in ss:
ss = ss|cknbr(base, color, a[0],a[1])
if intss >= ss:
return ss
return ss
cnt1 = cnt1 + 1
def getmpt(box):
x = int((box[2]+box[0])/2.0)
y = int((box[3]+box[1])/2.0)
return x,y
def getrpic(type):
pics = os.listdir('h:/c_bak/la446/project/landuse/'+type)
lng = random.randrange(0,len(pics))
return 'h:/c_bak/la446/project/landuse/'+type+'/'+pics[lng]
def putrpx(im, ss, color, pc):
pr_cnt = int(len(ss)*pc)
while pr_cnt > 1:
i = random.randrange(0,len(ss))
im.putpixel(ss[i],color)
pr_cnt = pr_cnt-1
return im
def not_white(im):
bb = im.getbbox()
ss = set()
for a in range(0,bb[2]):
for b in range(0, bb[3]):
if im.getpixel((a,b)) != (255,255,255):
ss.add((a,b))
return ss
def c_fuzz(ck_color,color,fuzz):
ss = set()
ck=set()
ck.add(ck_color)
for a in range((color[0]-fuzz),(color[0]+fuzz)):
for b in range((color[1]-fuzz),(color[1]+fuzz)):
for c in range((color[2]-fuzz),(color[2]+fuzz)):
ss.add((a,b,c))
if ck.issubset(ss):
return True
else:
return False
def find_near(img,ck_pt,color,rng,fz):
ss = set()
px_ss = set()
bb = img.getbbox()
for a in range((ck_pt[0]-rng),(ck_pt[0]+rng)):
for b in range((ck_pt[1]-rng),(ck_pt[1]+rng)):
ss.add((a,b))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
if c_fuzz(img.getpixel(i),color,fz):
px_ss.add(i)
return px_ss
def main():
print "\n...Generating, please wait:"
cnt1 = 0
for i in not_white(sedge):
fill_color(scale(i),(0,0,255),scr)
print "\n...Site Border Edges"
for i in not_white(eroads):
fill_color(scale(i),(255,255,0),scr)
scr.save('h:/c_bak/la446/project/landuse/'+s_name)
print "\n...pedestrian spaces"
for i in not_white(ebldgs):
im = fill_pattern(scr,scale(i),'mixed')
im.save('h:/c_bak/la446/project/landuse/'+s_name)
print "\n...mixed use areas"
while cnt1 < 500 :
x = random.randrange(bb[0],bb[-2])
y = random.randrange(bb[1],bb[-1])
ckpx = use.getpixel((x,y))
if ckpx == (168,99,168):
ss = look4contig(use, x, y)
for i in ss:
fill_pattern(scr, scale(i), 'idust')
if c_fuzz(ckpx,(0,114,54),(12)):
ss = look4contig(use, x, y)
for i in ss:
fill_pattern(scr, scale(i), 'alley')
cnt1 = cnt1+1
scr.save('h:/c_bak/la446/project/landuse/'+s_name)
print "\n...industrial spaces"
print "\n...alley edges"
for i in not_white(c001):
fill_color(scale(i),(237,27,35),scr)
scr.save('h:/c_bak/la446/project/landuse/'+s_name)
print "\n...proposed centers 001"
for i in not_white(c002):
fill_color(scale(i),(237,27,35),scr)
scr.save('h:/c_bak/la446/project/landuse/'+s_name)
print "\n...proposed centers 002"
cnt1 = 0
for i in not_white(rden):
fill_color(scale(i),rden.getpixel(i),scr)
print "\n...main roads"
scr.save('h:/c_bak/la446/project/landuse/'+s_name)
print "\ndefining the Edges..."
scr.save('h:/c_bak/la446/project/landuse/'+s_name)
print "\n...Interpretating Edge Relationshiops"
def edge():
wi = 1000
hi = 350
scr2 = Image.new('RGB',(wi,hi),(0,0,0))
scr1=Image.open('h:/c_bak/la446/project/landuse/'+s_name)
cnt1 = 0
cnt_100_ = 0
bb = (0,0,318,106)
for i in prange((scr1.getbbox()[0],scr1.getbbox()[1]),(scr1.getbbox()[2],scr1.getbbox()[3])):
fill_color(scale(i),scr1.getpixel((i[0],i[1])),scr2)
while cnt1 < 1000 :
ss = set()
x = random.randrange(bb[0],bb[-2])
y = random.randrange(bb[1],bb[-1])
#ckpx = scr1.getpixel((x,y))
ckpx = (x,y)
if c_fuzz(scr1.getpixel(ckpx),(255,125,0),5):
ss = find_near(scr1,ckpx,(237,27,36),5,10)
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_color(scale(i),(255,255,0),scr2)
#print "\n...Main Road to Proposed Center 001"
if c_fuzz(scr1.getpixel(ckpx),(255,255,0),5):
ss = find_near(scr1,ckpx,(237,27,36),5,10)
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_color(scale(i),(255,255,0),scr2)
## #print "\n...Walking to Proposed Center 001"
if c_fuzz(scr1.getpixel(ckpx),(255,255,0),5):
ss=(find_near(scr1,ckpx,(109,207,246),5,10))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_pattern(scr2, scale(i), 'cover')
ss=(find_near(scr1,ckpx,(0,166,80),5,10))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_pattern(scr2, scale(i), 'cover')
## #print "\n...Walking to Residential"
if c_fuzz(scr1.getpixel(ckpx),(0,144,54),10) or c_fuzz(scr1.getpixel(ckpx),(222,237,203),10):
ss=(find_near(scr1,ckpx,(0,144,54),5,10))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_color(scale(i),(0,166,80),scr2)
ss=(find_near(scr1,ckpx,(222,237,203),5,10))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_color(scale(i),(0,166,80),scr2)
## #print "\n...Alley and Residential"
if c_fuzz(scr1.getpixel(ckpx),(255,255,0),5):
ss=find_near(scr1,ckpx,(255,255,0),10,10)
if len(ss) > 85:
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_pattern(scr2, scale(i), 'plaza')
## #print "\n...Large Walking Areas to Plaza"
if c_fuzz(scr1.getpixel(ckpx),(0,166,80),10) or c_fuzz(scr1.getpixel(ckpx),(109,207,246),10):
ss = (find_near(scr1,ckpx,(168,99,168),5,10))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_color(scale(i),(255,255,0),scr2)
ss = (find_near(scr1,ckpx,(145,61,105),5,10))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_color(scale(i),(255,255,0),scr2)
## #print "\n...Residential Next to Idustirial"
if c_fuzz(scr1.getpixel(ckpx),(255,125,0),10):
ss = (find_near(scr1,ckpx,(109,207,246),5,10))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_pattern(scr2, scale(i), 'commer')
ss = (find_near(scr1,ckpx,(0,166,80),5,10))
for i in ss:
if bb[0] < i[0] < bb[2] and bb[1] < i[1] < bb[3]:
fill_pattern(scr2, scale(i), 'commer')
## #print "\n...Retail and Commerce by Main Roads"
cnt1 = cnt1+1
scr2.save('h:/c_bak/la446/project/landuse/'+'fin_02_'+s_name)
| [
"noreply@github.com"
] | fosdick.noreply@github.com |
6d0ee70b3304e74e9b3ce46f73f5797a5ccc3185 | 3c10a95924f3c9a07a28ee9919c4c1424e4a27da | /lab-1-2-answer/PlusFirstListener.py | 6443fd381326f67f247d17504b23545118efd6b6 | [] | no_license | cyh-ustc/PB15111656 | 5bcec5e1a53b8bb78c49817c75c814ee22594f3e | 37d1649f82c49fe1d959ce28bf5301e71827571a | refs/heads/master | 2021-08-30T10:02:49.975608 | 2017-12-17T11:22:30 | 2017-12-17T11:22:30 | 104,077,718 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,377 | py | # Generated from PlusFirst.g4 by ANTLR 4.7
from antlr4 import *
if __name__ is not None and "." in __name__:
from .PlusFirstParser import PlusFirstParser
else:
from PlusFirstParser import PlusFirstParser
# This class defines a complete listener for a parse tree produced by PlusFirstParser.
class PlusFirstListener(ParseTreeListener):
# Enter a parse tree produced by PlusFirstParser#Add.
def enterAdd(self, ctx:PlusFirstParser.AddContext):
pass
# Exit a parse tree produced by PlusFirstParser#Add.
def exitAdd(self, ctx:PlusFirstParser.AddContext):
pass
# Enter a parse tree produced by PlusFirstParser#Mult.
def enterMult(self, ctx:PlusFirstParser.MultContext):
pass
# Exit a parse tree produced by PlusFirstParser#Mult.
def exitMult(self, ctx:PlusFirstParser.MultContext):
pass
# Enter a parse tree produced by PlusFirstParser#Int.
def enterInt(self, ctx:PlusFirstParser.IntContext):
pass
# Exit a parse tree produced by PlusFirstParser#Int.
def exitInt(self, ctx:PlusFirstParser.IntContext):
pass
# Enter a parse tree produced by PlusFirstParser#Barc.
def enterBarc(self, ctx:PlusFirstParser.BarcContext):
pass
# Exit a parse tree produced by PlusFirstParser#Barc.
def exitBarc(self, ctx:PlusFirstParser.BarcContext):
pass
| [
"cyh88888@mail.ustc.edu.cn"
] | cyh88888@mail.ustc.edu.cn |
46a07a08a329faf72677b4ffcea537a2592aa299 | 963d3d12f1f4bef84c1723f6ff84bcd8df1c5c28 | /institute_master/urls.py | c7c51edfe8af1adf7ae202b2b2dca6f4604771fd | [] | no_license | raghu4033/kalaveethi | 1aa287b2b124fc3904a31068abb2ca6e2c3ea3d5 | 698a798461798eca5851737478ea3957b31ed3d4 | refs/heads/master | 2023-01-19T23:29:18.731126 | 2020-11-27T08:27:48 | 2020-11-27T08:27:48 | 310,601,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 978 | py | """institute_master URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path,include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('admin/',admin.site.urls),
path('',include('myapp.urls')),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"harshadsatasiya100@gmail.com"
] | harshadsatasiya100@gmail.com |
ab81aec92b4137221d359ec9b7ddacf88e47a00b | 81e008b746f89d144066ee5589fafa370f37e5a5 | /1005.py | 8bf880a472d0444a295f1fb678435685f4c44eb9 | [] | no_license | osmarsalesjr/SolucoesUriOnlineJudgeEmPython3 | 5c43fb37608ff3d8ff042d94e6b897f4b1d6afb9 | 5de3fa39483fd4ff409efa5981e65daba7744809 | refs/heads/master | 2021-01-01T06:40:51.938732 | 2017-08-30T21:46:39 | 2017-08-30T21:46:39 | 97,482,585 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 175 | py |
def main():
a = float(input())
b = float(input())
media = ((a * 3.5) + (b * 7.5)) / 11
print("MEDIA = %.5f" % media)
if __name__ == '__main__':
main() | [
"osmarsalesjr@gmail.com"
] | osmarsalesjr@gmail.com |
ac492aaff24db73e4bcdf97093239025e9388ee9 | 0e063d2614f1c156fd4d180520487905c2e387b5 | /app/core/management/commands/wait_for_db.py | 74ddae437a87be914405f4484bdba60360006aeb | [
"MIT"
] | permissive | ranman034/recipe-app-api | 75a70971ac1defcd81ecb14c60b4ed82efe401a2 | 5c4cda9c50012b66cb74b353cca36e09a8971c80 | refs/heads/master | 2021-04-07T12:27:57.423470 | 2020-04-04T23:44:14 | 2020-04-04T23:44:14 | 248,675,428 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 703 | py | """Wait for db command file"""
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution until database is available"""
def handle(self, *args, **options):
self.stdout.write('Waiting for database...')
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write('Database unavailable, waiting 1 second...')
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| [
"ranman034@gmail.com"
] | ranman034@gmail.com |
23ab00fb5704520dc6bd99fabd951f3fdff77f8c | b9a9ebcafb0dd0921ef6409618b15d7d95cce998 | /python3_learning/operators.py | 799832c4306968e3d700d64b3c2e03283a6007cf | [] | no_license | wickyou23/python_learning | 16f52ba3e0124f9699d20c3f949dfa00fe667194 | 7678d47b9910cb68c9e80d1c4b7daf65d5d4f77e | refs/heads/main | 2023-02-18T20:07:03.715764 | 2021-01-22T08:17:02 | 2021-01-22T08:17:02 | 331,880,750 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | py | #####Python operators
# x1 = 3/2 #float div
# x2 = 3//2 #interger div
# x3 = 3**2
# x4 = 6%2
# print(x1, x2, x3, x4)
#####Operator Precedence
# x5 = 1+3*2
# print(x5)
#####Augmented Assignment Operator
# x6 = 1
# x6 += 1
# print(x6) | [
"thangphung@homatechs.com"
] | thangphung@homatechs.com |
594f7ee6ba623887c47dbde85e653e5183136971 | f9886d2b57d92186773d73f59dc0a0e9759b8944 | /04_bigdata/02_Standardization_Analysis/2.Excel/10_excel_column_by_name_all_worksheets.py | c9fc988e32e973138146a52d7b0e7546d7aca05f | [] | no_license | Meengkko/bigdata_python2019 | 14bab0da490bd36c693f50b5d924e27f4a8e02ba | a28e964ab7cefe612041830c7b1c960f92c42ad5 | refs/heads/master | 2022-12-12T15:51:21.448923 | 2019-11-08T03:50:15 | 2019-11-08T03:50:15 | 195,142,241 | 0 | 0 | null | 2022-04-22T22:37:59 | 2019-07-04T00:17:18 | HTML | UTF-8 | Python | false | false | 1,894 | py | # 목적: 열의 인덱스 값을 사용하여 특정 열 선택하기
# 라이브러리 호출
import sys
from datetime import date
from xlrd import open_workbook, xldate_as_tuple
from xlwt import Workbook
# 시스템 인자로 인풋/아웃풋 설정
input_file = sys.argv[1] # sales_2013.xlsx
output_file = sys.argv[2] # output_files/10_output_basic.xls
# 워크북클래스, 시트 이름 설정
output_workbook = Workbook()
output_worksheet = output_workbook.add_sheet('selected_columns_all_worksheets')
my_columns = ['Customer Name', 'Sale Amount']
first_worksheet = True
# 파일 오픈 및 1월 데이터 가져오기
with open_workbook(input_file) as workbook:
data = [my_columns]
index_of_cols_to_keep = []
for worksheet in workbook.sheets():
if first_worksheet:
header = worksheet.row_values(0)
for column_index in range(len(header)):
if header[column_index] in my_columns:
index_of_cols_to_keep.append(column_index)
first_worksheet = False
for row_index in range(1, worksheet.nrows):
row_list = []
for column_index in index_of_cols_to_keep:
cell_value = worksheet.cell_value(row_index, column_index)
cell_type = worksheet.cell_type(row_index, column_index)
if cell_type == 3:
date_cell = xldate_as_tuple(cell_value, workbook.datemode)
date_cell = date(*date_cell[0:3]).strftime('%m/%d/%Y')
row_list.append(date_cell)
else:
row_list.append(cell_value)
data.append(row_list)
for list_index, output_list in enumerate(data):
for element_index, element in enumerate(output_list):
output_worksheet.write(list_index, element_index, element)
output_workbook.save(output_file)
| [
"you@ddd.com"
] | you@ddd.com |
2bf6c9b4565b482f6f3bf591dff52b720de3c933 | 3471dc6d48ba54c7f215148d9a37289d5ace426f | /settings.py | 3ed6dbc9b03a05b0564b5143cc69949af2504203 | [] | no_license | bigguozi/AlienInvasion | a173130d141afd39e63a23b7bc3bff644e79e74c | 350d3e20854d60e81a15306b6fbe070f7c6d3071 | refs/heads/master | 2020-03-15T10:36:42.791491 | 2018-05-04T07:53:28 | 2018-05-04T07:53:28 | 132,102,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 898 | py | import pygame
import math
class Settings():
"""存储项目中设置的类"""
def __init__(self):
self.screen_width = 1280
self.screen_height = 960
self.bg_color = (230,230,230)
self.enemy_num = 8
self.score = 0
self.bullet_num = 10
self.shot_max_num = 3
self.hit_reward = 4
self.ship_speed = 50
self.enemy_speed = 60
self.enemy_hp = 4
self.ship_hp = 8
self.enemy_size = (90,90)
self.ship_size = (60,60)
self.ship_angle_inc = math.pi/5
self.bullet_speed = 40
self.bg = pygame.image.load('background.jpg')
self.bg = pygame.transform.scale(self.bg,(self.screen_width,self.screen_height))
self.hp_item_str = 'hp_item.png'
self.item_dict={'hp_item':self.hp_item_str}
self.item_size= (96,50) | [
"sunzhengyuan@qq.com"
] | sunzhengyuan@qq.com |
3d21736f1f555dbda1de1de0359172517d5941ef | abca5285baf353f2296aae2ed5e040c89b9019e3 | /auth/tests/__init__.py | decf1a2e36ea8ed3e7b453181c05bef27750ddf1 | [] | no_license | alekLukanen/Cybersecurity_authenticationViaDB | b73f14a12a76f935b5812094d79989d58e511898 | eda76ecfee22faa3f2cb63d61e1c5e90d430319f | refs/heads/master | 2022-12-10T10:33:22.404564 | 2019-04-12T21:47:19 | 2019-04-12T21:47:19 | 179,351,987 | 0 | 0 | null | 2022-04-22T21:05:42 | 2019-04-03T18:59:24 | Python | UTF-8 | Python | false | false | 12 | py | import comms | [
"alukanen@icloud.com"
] | alukanen@icloud.com |
254cbcb490b5f78744401d276bf2b8db5c0150b2 | 123869c3d22fa3512653c9ff4bbe1692e21d11f4 | /CS320/hw2/interpret.py | 4d21df039deb2840431a780c3b00f668a159f6f3 | [] | no_license | Pgomes94/School | 3b915bfeec397688f8dd8ccaf3dee2b405d1e865 | da5344be0efe3e26d7f6d2dca386fc834c3a9357 | refs/heads/master | 2021-03-24T12:16:52.719193 | 2016-07-30T03:57:47 | 2016-07-30T03:57:47 | 46,447,077 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 8,015 | py | from math import log, floor
import re
import parse
def tokenize(terminals, characters):
terminals = [re.escape(t) for t in terminals]
terminals = '|'.join(terminals)
terminals = '(' + terminals + ')'
tokens = [t for t in re.split(terminals, characters)]
return [t for t in tokens if not t.isspace() and not t ==""]
def evalTerm(env, t):
if type(t) == dict:
for label in t:
children = t[label]
if label == 'Number':
f = children[0]
return f
elif label == 'Variable':
f = children[0]
if f in env:
v = env[f]
return v
elif label == 'Log':
f = children[0]
x = evalTerm(env, f)
if not x is None:
v = floor( log(x,2) )
return v
elif label == 'Plus':
f1 = children[0]
x1 = evalTerm(env, f1)
f2 = children[1]
x2 = evalTerm(env, f2)
if not x1 is None and not x2 is None:
return x1 + x2
elif label == 'Mult':
f1 = children[0]
x1 = evalTerm(env, f1)
f2 = children[1]
x2 = evalTerm(env, f2)
if not x1 is None and not x2 is None:
return x1 * x2
elif label == 'Parens':
f = children[0]
return evalTerm(env, f)
def evalFormula(env, f):
if type(f) == dict:
for label in f:
children = f[label]
if label == 'Variable':
t = children[0]
if t in env:
v = env[t]
return v
if label == 'Not':
t = children[0]
v = evalFormula(env, t)
return not v
elif label == 'Xor':
t2 = children[1]
v2 = evalFormula(env, t2)
t1 = children[0]
v1 = evalFormula(env, t1)
return v1 != v2
elif label == 'Parens':
t = children[0]
return evalFormula(env, t)
elif type(f) == str:
if f == 'True':
return True
if f == 'False':
return False
def execProgram(env, s):
if type(s) == list:
s2 = s
s = s[0]
if type(s) == dict:
for label in s:
children = s[label]
if label == 'Print':
f = [execProgram(env, x) for x in children]
f = f[0:len(f) -1]
try:
s2
except NameError:
s2 = None
if not s2 is None:
f.append(execProgram(env, s2[1])[1][0])
return (env, f)
elif label == 'Assign':
f = children[0]
env[(f['Variable'])[0]] = execProgram(env,children[1])
return execProgram(env, (children[2:])[0])
elif label == 'Not':
f = children[0]
v = evalFormula(env, f)
return not v
elif label== 'Variable':
if children[0] in env:
return env[children[0]]
elif label == 'Number':
f = children[0]
return f
elif label == 'If':
f = children[0]
f = env[ (f['Variable'])[0]]
if f == True:
return execProgram(env, (children[1:]))
elif f == False:
return execProgram(env, (children[2:]))
return
elif label =='While':
f = children[0]
f2 = execProgram(env,f)
e1=list()
while f2 == True:
f2 = children[1:]
e1.append(execProgram(env, f2[0])[1][0])
execProgram(env, f)
f2 = execProgram(env, f)
try:
s2
except NameError:
s2 = None
f2 = children[2:]
e = execProgram(env,f2[0])
for x in e[0]:
e1.append(env[x])
return (env, e1)
if not s2 is None:
e1.append(execProgram(env, s2[1])[1][0])
return (env,e1)
temp=children[1:][0].values()[0]
while( type(temp == dict)):
if 'End' in temp:
return (env,e1)
temp = temp[1:][0].values()[0]
try:
s2
except NameError:
s2 = None
f2 = children[2:]
print(f2)
print(execProgram(env, f2[0]))
e1.append(execProgram(env, f2[0])[1][0])
return (env, e1)
if not s2 is None:
e1.append(execProgram(env, s2[1])[1][0])
return (env,e1)
elif label =='Plus':
f = evalTerm(env, s)
return f
elif label =='Mult':
f = evalTerm(env, s)
return f
elif label == 'Xor':
t2 = children[1]
v2 = evalFormula(env, t2)
t1 = children[0]
v1 = evalFormula(env, t1)
return v1 != v2
elif type(s) == str:
if s =='End':
return (env, [])
if s == 'False':
return False
if s == 'True':
return True
def interpret(tokens):
regex =['print' , 'assign' , ':=' , 'if', 'true' , 'false' , 'while' , 'not' , 'xor' , '+'\
, '*' , ';' , '{' , '}' , '(' , ')' , ' ']
tokens = parse.tokenize(regex, tokens)
tokens = parse.program(tokens)
return execProgram({}, tokens[0])[1]
| [
"pgomes94@137-21-41-155-wireles1x.bu.edu"
] | pgomes94@137-21-41-155-wireles1x.bu.edu |
cf78d60fd3af2bcc76145207d4dd89cda7938994 | 13710f5fff99f4225956b705bb81b7055f119b7c | /homework1-1.py | 235731744a6e018d3c389d8aa58dfb045e0f5a31 | [] | no_license | QzP-QD/UAI_homework | a94f6a6176a29bdd6b4b56ce8fca52f2fd0728ca | 860b0b74f0e928405a98095c067d86cb6dfc6d2d | refs/heads/master | 2023-01-14T08:34:38.908896 | 2020-11-16T13:30:19 | 2020-11-16T13:30:19 | 311,895,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,388 | py | #画出云模型点阵图和收敛曲线(动态)————He在小于En/3附近有效果
import numpy as np
import matplotlib.pyplot as plt
from time import time
def plot_cloud_model(Ex, En, He, n, num, color=''):
# Ex = 0 # 期望
# En = 1 # 熵
# He = 0.1 # 超熵
# n = 500 # 云滴个数
Y = np.zeros((1, n))
np.random.seed(int(time()))
X = np.random.normal(loc=En, scale=He, size=n)
Y = Y[0] # 什么作用???
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = True
fig = plt.figure(len(plt.get_fignums()))
ax = fig.add_subplot(1, 4, num + 1)
for i in range(n):
np.random.seed(int(time()) + i + 1)
Enn = X[i]
X[i] = np.random.normal(loc=Ex, scale=np.abs(Enn), size=1)
Y[i] = np.exp(-(X[i] - Ex) * (X[i] - Ex) / (2 * Enn * Enn))
ax.scatter(X, Y, s=5, alpha=0.5, c=color, marker='o', label='云图1')
title = '期望:%.2f,熵:%.2f,超熵:%.2f,云滴数:%d' % (Ex, En, He, n)
ax.set_title(title)
ax.legend(loc='best')
ax.set_xlabel('指标值')
ax.set_ylabel('确定度')
ax.grid(True)
plt.show()
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = True
def updateXY(He):
Ex = 0
En = 1
n = 5000
Y = np.zeros((1, n))
Y1 = np.zeros((1, n))
Y2 = np.zeros((1, n))
np.random.seed(int(time()))
X = np.random.normal(loc=En, scale=He, size=n)
Y = Y[0]
for i in range(n):
np.random.seed(int(time()) + i + 1)
Enn = X[i]
X[i] = np.random.normal(loc=Ex, scale=np.abs(Enn), size=1)
Y[i] = np.exp(-(X[i] - Ex) * (X[i] - Ex) / (2 * Enn * Enn))
return X,Y
fig = plt.figure(len(plt.get_fignums()))
ax = fig.add_subplot(1, 1, 1)
plt.ion()
for i in range(200):
plt.cla()
He = i/10
X,Y= updateXY(He)
X1 = np.linspace(-10,10,100)
Y1 = np.exp(-(X1 - 0)**2 / (2 * (1 + 3 * He)**2))
Y2 = np.exp(-(X1 - 0)**2 / (2 * (1 - 3 * He)**2))
title = '期望:%.2f,熵:%.2f,超熵:%.2f,云滴数:%d' % (0, 1, He, 500)
plt.title(title)
plt.xlim((-10, 10))
plt.ylim((0, 1))
plt.grid(True)
plt.plot(X1, Y1,label='linear')
plt.plot(X1, Y2,label='quadratic')
plt.scatter(X, Y, s=5, alpha=0.5, c='r', marker='o', label='云图1')
plt.pause(0.02)
plt.ioff()
plt.show()
| [
"qzp332@bupt.edu.cn"
] | qzp332@bupt.edu.cn |
ebc12958117ea6b33a4abf6238daca974964b779 | 470f785edde1513341434a8e0bd641bf84341925 | /src/MetadataDbLinker/monkey_patcher.py | 9ee0ec384b0784b5f4ce01931d7f2ec44295aa27 | [] | no_license | Septima/qgis-metadatafunctionality | 6d5a5213e146c5cbeb4cf070b26bcfa66b907357 | c7ec805a54b00eceb610e61d624c66e4877dee6a | refs/heads/master | 2021-05-01T00:26:06.527219 | 2020-05-15T11:21:47 | 2020-05-15T11:21:47 | 55,409,107 | 3 | 2 | null | 2020-05-15T11:21:49 | 2016-04-04T12:23:23 | Python | UTF-8 | Python | false | false | 2,602 | py | # ----------------------------------------------------------------
# Monkey patch the HTML generator for the db manager
# ----------------------------------------------------------------
# http://blog.dscpl.com.au/2015/03/safely-applying-monkey-patches-in-python.html
# https://pypi.python.org/pypi/wrapt
# from db_manager.db_plugins.html_elems import HtmlContent, HtmlSection, HtmlParagraph, HtmlList, HtmlTable, HtmlTableHeader, HtmlTableCol
#
# PGTableInfo.getTableInfo_original = PGTableInfo.getTableInfo
# PGVectorTableInfo.getTableInfo_original = PGTableInfo.getTableInfo
#
#
# def newGetTableInfo(self):
# QgsMessageLog.logMessage("newGetTableInfo()")
#
# ret = []
#
# general_info = self.generalInfo()
# if general_info is None:
# pass
# else:
# ret.append(HtmlSection(QApplication.translate("DBManagerPlugin", 'General info'), general_info))
#
# return ret
#
#
# # toHtml_backup = DatabaseInfo.toHtml
#
# PGVectorTableInfo.getTableInfo = newGetTableInfo
# PGTableInfo.getTableInfo = newGetTableInfo
# QgsMessageLog.logMessage("PGTableInfo.getTableInfo() patched.")
import wrapt
import inspect
from qgis.core import QgsMessageLog
from db_manager.db_plugins.postgis.info_model import PGTableInfo
def wrapper(wrapped, instance, args, kwargs):
return "xxxx"
wrapt.wrap_function_wrapper(PGTableInfo, 'getTableInfo', wrapper)
# @wrapt.decorator
# def universal(wrapped, instance, args, kwargs):
#
# from qgis.core import QgsMessageLog
# QgsMessageLog.logMessage(str(inspect.isclass), "General")
#
# if instance is None:
# if inspect.isclass(wrapped):
# # Decorator was applied to a class.
# return wrapped(*args, **kwargs)
# else:
# # Decorator was applied to a function or staticmethod.
# return wrapped(*args, **kwargs)
# else:
# if inspect.isclass(instance):
# # Decorator was applied to a classmethod.
# return wrapped(*args, **kwargs)
# else:
# # Decorator was applied to an instancemethod.
# return wrapped(*args, **kwargs)
#
# PGTableInfo.getTableInfo = universal(PGTableInfo.getTableInfo)
# import inspect
# QgsMessageLog.logMessage(str(inspect), "Septima")
#
#
# def wrapper(wrapped, instance, args, kwargs):
# QgsMessageLog.logMessage("@wrapper", "Septima")
# return wrapped(*args, **kwargs)
#
# wrapt.wrap_function_wrapper('db_manager.db_plugins.postgis.info_model', 'PGTableInfo.getTableInfo', wrapper)
#
# QgsMessageLog.logMessage("PGTableInfo.getTableInfo() patched.", "Septima") | [
"bs@metascapes.org"
] | bs@metascapes.org |
e831918416256c25927fb1be5c435b8555f05dc6 | 577a40ff1c84d28b88a9ade84d265587d28ed2a3 | /0707/02.py | 1c05cbec1dfbd4eeaecf48ec375bcfb73a53d48c | [] | no_license | bosl95/MachineLearning_Note | b167c182fcf5186f6466b8b062cde83b076b0b04 | 934714c5a62e4864f2b5338153c3aaeb3363abe9 | refs/heads/master | 2022-12-06T20:58:20.457567 | 2020-09-05T16:18:11 | 2020-09-05T16:18:11 | 279,835,223 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,580 | py | import struct
name = 'train'
maxdata = 1000
path = 'mnist/'
lbl_f = open(path + name + '-labels-idx1-ubyte', 'rb') # 학습정답파일. 바이너리.
img_f = open(path + name + '-images-idx3-ubyte', 'rb')
csv_f = open(path + name + '.csv', 'w', encoding='utf-8')
mag, lbl_count = struct.unpack('>II', lbl_f.read(8)) # 레이블파일에서 매직넘버와 개수를 읽음
print(lbl_count)
mag, img_count = struct.unpack('>II', img_f.read(8)) # 숫자 이미지파일에서 매직넘버와 개수를 읽음
print(mag)
print(img_count)
row, col = struct.unpack('>II', img_f.read(8)) # 숫자 이미지파일에서 이미지 가로, 세로 길이 읽음
print(row)
print(col)
px = row * col # 숫자이미지 한개의 바이트 수(크기)
res = []
for idx in range(lbl_count):
if idx > maxdata: # 1000이 넘으면 break
break
label = struct.unpack("B", lbl_f.read(1))[0] # 정답 파일(레이블)에서 숫자 한개씩 읽음
bdata = img_f.read(px) # 숫자 이미지 파일에서 이미지 한 개 크기만큼 읽어서 bdata에 담음.
sdata = list(map(lambda n: str(n), bdata))
# print(sdata)
csv_f.write(str(label) + ',')
csv_f.write(','.join(sdata) + '\r\n')
if idx < 10: # 이 if 블럭은 써도 되고, 안써도 됨. 이미지를 단위별로 잘 불러오나 확인용
s = 'P2 28 28 255\n'
s += ' '.join(sdata)
iname = path + '{0}-{1}-{2}.pgm'.format(name, idx, label)
with open(iname, 'w', encoding='utf-8') as f:
f.write(s)
csv_f.close()
lbl_f.close()
img_f.close() | [
"bosl95@naver.com"
] | bosl95@naver.com |
369183498068e8e4659aa370fd0efa60b8a6ebd1 | 72316a1d1a2e0358486d50aeecbac8219ccdf092 | /ietf/bin/send-milestone-reminders | 9ed5d254f74bbac1e2488cb1549dcb81cb5f4510 | [
"BSD-3-Clause",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | algby/ietfdb | 363541941bd6e806bed70891bed4c7f47c9f0539 | 9ff37e43abbecac873c0362b088a6d9c16f6eed2 | refs/heads/master | 2021-01-16T18:57:50.100055 | 2014-09-29T21:16:55 | 2014-09-29T21:16:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,566 | #!/usr/bin/env python
#
# This script will send various milestone reminders. It's supposed to
# be run daily, and will then send reminders weekly/monthly as
# appropriate.
import datetime, os
import syslog
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ietf.settings")
syslog.openlog(os.path.basename(__file__), syslog.LOG_PID, syslog.LOG_LOCAL0)
from ietf.group.mails import *
today = datetime.date.today()
MONDAY = 1
FIRST_DAY_OF_MONTH = 1
if today.isoweekday() == MONDAY:
# send milestone review reminders - ideally we'd keep track of
# exactly when we sent one last time for a group, but it's a bit
# complicated because people can change the milestones in the mean
# time, so dodge all of this by simply sending once a week only
for g in groups_with_milestones_needing_review():
mail_sent = email_milestone_review_reminder(g, grace_period=7)
if mail_sent:
syslog.syslog("Sent milestone review reminder for %s %s" % (g.acronym, g.type.name))
early_warning_days = 30
# send any milestones due reminders
for g in groups_needing_milestones_due_reminder(early_warning_days):
email_milestones_due(g, early_warning_days)
syslog.syslog("Sent milestones due reminder for %s %s" % (g.acronym, g.type.name))
if today.day == FIRST_DAY_OF_MONTH:
# send milestone overdue reminders - once a month
for g in groups_needing_milestones_overdue_reminder(grace_period=30):
email_milestones_overdue(g)
syslog.syslog("Sent milestones overdue reminder for %s %s" % (g.acronym, g.type.name))
| [
"henrik@levkowetz.com@7b24d068-2d4e-4fce-9bd7-cbd2762980b0"
] | henrik@levkowetz.com@7b24d068-2d4e-4fce-9bd7-cbd2762980b0 | |
8d3790bbab25bdfb53a435740b4fd7b48ae7f666 | b0e438a81a81593cf73d36d36eb618dc165ff611 | /IISY/iisy_landing/admin.py | af5a259a9a4a2d2d12086b542cb3a1b1198cdc5a | [] | no_license | maxwesterback/public-iisy | 51115934343b4a0b20c201b83129d73fc0a9b052 | b4848dad54963e7fbc8ccef73733bc07c1880312 | refs/heads/master | 2023-08-11T09:08:08.053625 | 2021-09-22T11:50:14 | 2021-09-22T11:50:14 | 401,070,557 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,809 | py | from django.contrib import admin
from .models import Department
from .models import Entity
from .models import Ticket
from .models import EntityType
from .models import Room
import qrcode
from os import path
from PIL import Image, ImageDraw, ImageFont
import os
import tempfile
import PIL
from django.http import HttpResponse
import zipfile
from io import BytesIO
# Inline form for entities displaying tickets
class TicketInLine(admin.TabularInline):
model = Ticket
extra = 0
readonly_fields = ['location', ]
# Inline form for Department displaying entities
class EntityInLine(admin.TabularInline):
model = Entity
extra = 0
exclude = ('quantity',)
readonly_fields = ['id', 'slug', 'uuid']
class DepartmentAdmin(admin.ModelAdmin):
list_display = ['name', 'id', 'numberOfEntities']
list_filter = ('name',)
#inlines = [EntityInLine, ]
readonly_fields = ['id']
class EntityTypeAdmin(admin.ModelAdmin):
list_display = ['name', 'email']
# Action for qr codes
def make_qr_codes(modeladmin, request, queryset):
buffer = BytesIO()
zf = zipfile.ZipFile(buffer, 'w')
i=1
for obj in queryset:
qr = qrcode.QRCode(
version=1,
box_size=15,
border=5
)
qr.add_data(obj.slug)
qr.make(fit=True)
img = qr.make_image(fill='black', back_color='white')
width, height = img.size
draw = ImageDraw.Draw(img)
text_name = obj.name + str(obj.id)
draw.text((40, height - 40), text_name)
temp = tempfile.TemporaryFile()
img.save(temp)
temp.seek(0)
zf.writestr(obj.name + str(obj.id) + '.png', temp.read())
i+=1
zf.close()
response = HttpResponse(buffer.getvalue())
response['Content-Type'] = 'application/x-zip-compressed'
response['Content-Disposition'] = 'attachment; filename=qr_codes.zip'
return response
make_qr_codes.short_description = "Make qr codes for selected devices"
class RoomAdmin(admin.ModelAdmin):
list_display = ['name', 'location']
actions = [make_qr_codes]
readonly_fields = ['uuid', 'slug']
class EntityAdmin(admin.ModelAdmin):
list_display = ('entityType', 'name', 'id', 'department', 'numberOfOngoingTickets',
'location', 'email')
list_filter = ('department', 'name')
readonly_fields = ['id', 'uuid', 'slug', 'numberOfOngoingTickets', ]
actions = [make_qr_codes]
#inlines = [TicketInLine, ]
# Changing the add and change views so they display different fields
def delete_queryset(self, request, queryset):
for obj in queryset:
parent = obj.department
parent.numberOfEntities -= 1
parent.save()
obj.delete()
def add_view(self, request, extra_content=None):
self.exclude = ('numberOfOngoingTickets',
'scanned', 'id', 'uuid', 'slug', 'qrCode',)
return super(EntityAdmin, self).add_view(request)
def change_view(self, request, object_id, extra_content=None):
self.exclude = ('qrCode', 'quantity')
return super(EntityAdmin, self).change_view(request, object_id)
# Methods for changing status of many tickets
def make_received(modeladmin, request, queryset):
queryset.update(status='1')
def make_ongoing(modeladmin, request, queryset):
queryset.update(status='2')
def make_dismissed(modeladmin, request, queryset):
queryset.update(status='3')
def make_done(modeladmin, request, queryset):
queryset.update(status='4')
def delete_selected_tickets(modeladmin, request, queryset):
for obj in queryset:
parent = obj.entity
print('before' + str(parent.numberOfOngoingTickets))
parent.numberOfOngoingTickets -= 1
parent.save()
obj.delete()
print('after' + str(parent.numberOfOngoingTickets))
make_done.short_description = "Mark selected tickets as done"
make_dismissed.short_description = "Mark selected tickets as dismissed"
make_ongoing.short_description = "Mark selected tickets as ongoing"
make_received.short_description = "Mark selected tickets as received"
class TicketAdmin(admin.ModelAdmin):
list_display = ['entity', 'room', 'department','type',
'message','location', 'created', 'status']
list_filter = ('department', 'created', 'entity',)
list_editable = ('status',)
readonly_fields = ['location']
actions = [make_done, make_received, make_dismissed, make_ongoing, delete_selected_tickets]
admin.site.register(Department, DepartmentAdmin)
admin.site.register(Entity, EntityAdmin)
admin.site.register(Ticket, TicketAdmin)
admin.site.register(EntityType, EntityTypeAdmin)
admin.site.register(Room, RoomAdmin)
| [
"max.westerback@abo.fi"
] | max.westerback@abo.fi |
2e88ae332259463a4ea2490adb3812b96ea469c8 | 96c40683d019477450394a791054657860051e44 | /projects/陈健/python/homework.py | 5261e7251907af7b128aa3a203ecf97725d195b3 | [] | no_license | HduiOSClub/HduiOSClub | 5bf826ddd05ce092b7c8f15a448b9396b7621772 | 41e59454317242a6c010cca0719c358f36f3e4c0 | refs/heads/master | 2021-08-24T11:08:00.333251 | 2017-11-29T06:19:57 | 2017-11-29T06:19:57 | 109,074,791 | 0 | 11 | null | 2017-12-09T12:27:30 | 2017-11-01T02:07:19 | Python | UTF-8 | Python | false | false | 130 | py | import requests
params={'name':'Sliver','pwd':'123456'}
r = requests.post("http://holen1210.xyz/login.php",params)
print(r.text)
| [
"sliver@MacBook.local"
] | sliver@MacBook.local |
8d4e01d63f029ae4f6264c3ec8a2b1b51bacfbc6 | 0fa51edef92cd07033e7d03aa441ae54d8edad2e | /news_scrapers/epu_scrapy/spiders/deredactie_spider.py | f7aab7c2ba79b7d300fec7f911dcf631998cb515 | [] | no_license | Datafable/epu-index | d86fc108f7e8591cb949fde78f490fd970654bde | 3f9d24448ff85a8ea6736dbf9da0ec954a3b224b | refs/heads/master | 2020-12-25T18:13:53.397154 | 2018-03-28T09:37:53 | 2018-03-28T09:37:53 | 35,040,805 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,835 | py | import scrapy
from scrapy.contrib.spiders import CrawlSpider
from scrapy.exceptions import CloseSpider
from epu_scrapy.items import Article
from datetime import datetime, timedelta
from time import strptime, strftime, mktime
import re
import json
import os
def set_start_urls(settings):
"""
Based on the dates given in the settings file, construct the start urls for the spider
"""
term = settings['term']
if type(settings['period']) is not dict:
today = datetime.today()
if settings['period'] != 'yesterday':
CloseSpider("unknown period setting. See the scrapers README for more information.")
search_day = today - timedelta(days=1) # search for articles of yesterday
search_day_str = '{0}/{1}/{2}'.format(search_day.day, search_day.month, search_day.year % 100)
start_urls = ['http://deredactie.be/cm/vrtnieuws/1.516538?text={0}&type=text&range=atdate&isdate={1}&sort=date&action=submit&advancedsearch=on'.format(term, search_day_str)]
else:
start = datetime(*strptime(settings['period']['start'], '%Y-%m-%d')[:6]) # awkward syntax to convert struct time to datetime (see: http://stackoverflow.com/questions/1697815/how-do-you-convert-a-python-time-struct-time-object-into-a-datetime-object)
start_str = '{0}/{1}/{2}'.format(start.day, start.month, start.year % 100)
end = datetime(*strptime(settings['period']['end'], '%Y-%m-%d')[:6])
end_str = '{0}/{1}/{2}'.format(end.day, end.month, end.year % 100)
start_urls = ['http://deredactie.be/cm/vrtnieuws/1.516538?text={0}&type=text&range=betweendate&startdate={1}&enddate={2}&sort=date&action=submit&advancedsearch=on'.format(term, start_str, end_str)]
return start_urls
class DeredactieSpider(CrawlSpider):
name = 'deredactie' # name of the spider, to be used when running from command line
allowed_domains = ['deredactie.be']
settings = json.load(open(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'crawling_settings.json')))
start_urls = set_start_urls(settings)
def parse(self, response):
"""
Parse the first search page to determine the number of articles returned. Use the urls offset parameter
to iterate over all response pages and yield scrapy.Request objects that will be parsed with the
parse_list_page function
"""
nr_of_articles_element = response.xpath('//li[contains(concat(" ", normalize-space(@class), " "), " searchcounter ")]')
if len(nr_of_articles_element) is 2:
# nr of articles is mentioned above list of articles and below. So the number of elements that match the xpath selector is 2
nr_of_articles_text = ''.join(nr_of_articles_element[0].xpath('descendant-or-self::*/text()').extract())
# Explaining the regular expression at line 53:
# (?P<offset>\d+) => matches a number (\d+) and assigns it to group "offset"
# (?P<pagesize>\d+) => matches a number (\d+) and assigns it to group "pagesize"
# \s+van\s+ => matches the word "van" surrounded by whitespace (spaces, tabs etc)
# (?P<nr_of_articles>\d+) => matches a number (\d+) and assigns it to group "nr_of_articles"
m = re.search('(?P<offset>\d+)-(?P<pagesize>\d+)\s+van\s+(?P<nr_of_articles>\d+)', nr_of_articles_text)
if m:
pagesize = int(m.group('pagesize')) - int(m.group('offset')) + 1
nr_of_articles = int(m.group('nr_of_articles'))
for i in range(0, nr_of_articles, pagesize):
# Note that the offset parameter starts at 0
yield scrapy.Request(self.start_urls[0] + '&offset={0}'.format(i), callback=self.parse_list_page)
else:
raise scrapy.exceptions.CloseSpider('Could not parse number of articles from {0}'.format(response.url))
else:
raise scrapy.exceptions.CloseSpider('Element containing the number of articles was not found at {0}'.format(response.url))
def parse_published_datetime(self, datetime_element_parts):
"""
Helper method to parse a datetime from a html element
"""
datetime_str_parts = [x.encode('utf-8') for x in datetime_element_parts]
datetime_str = ' '.join(datetime_str_parts).strip()
datetime_str_stripped = re.findall('[0-9]+/[0-9]+/[0-9]+[^0-9]+[0-9]+:[0-9]+', datetime_str)[0]
dt = datetime(*strptime(datetime_str_stripped, '%d/%m/%Y - %H:%M')[0:6])
return dt.isoformat()
def parse_list_page(self, response):
"""
Parse a single page returned by the search query. Find all links referring to articles and yield
scrapy.Request objects for every link found. The parsing of these links is done by the parse_article
function.
"""
print response.url
links = response.xpath('//div[contains(concat(" ", normalize-space(@class), " "), " searchresults ")]/descendant::a/@href').extract()
link_set = set([x.encode('utf-8') for x in links])
for l in link_set:
if l != '#':
# an article link can point to a single article page, or a storyline page, which includes several articles.
# in both cases, the id of the actual article that is pointed to can be found in the url. In the case
# of a storyline, the url is like /cm/vrtnieuws/buitenland/<storylineid>?eid=<articleid> while for a
# single article page, the url is /cm/vrtnieuws/binnenland/<articleid>. Both a storylineid and a articleid
# look something like 1.193019, which will be matched by the regular expression pattern [0-9.]+
article_id = re.findall('[0-9.]+', l)[-1] # the last string that matches this pattern in the url is the article id
l = 'http://deredactie.be/cm/' + article_id
yield scrapy.Request(l, callback=self.parse_article)
def parse_article(self, response):
"""
Parse the article content page
"""
# search for article title
title_parts = response.xpath('//div[@id="articlehead"]/h1/text()').extract()
if len(title_parts) > 0:
title = ' '.join(set(title_parts)).encode('utf-8').strip()
else:
title = ''
# search for article published date
datetime_element_parts = response.xpath('//small[@id="pubdate"]/strong/text()').extract()
if len(datetime_element_parts) > 0:
datetime_iso_str = self.parse_published_datetime(datetime_element_parts)
else:
datetime_iso_str = ''
# search for article intro text
article_intro_parts = response.xpath('//div[@id="intro"]/strong/text()').extract()
article_intro = ' '.join([x.strip() for x in article_intro_parts]).strip()
# search for article full text
article_full_text_fragments = response.xpath('//div[@id="articlebody"]/descendant::p/descendant-or-self::*/text()').extract()
article_full_text = ' '.join([x.strip() for x in article_full_text_fragments]).strip()
# reconstruct the url to the nicely rendered page
url_parts = response.url.split('/')
article_id = url_parts.pop()
url_parts.append('vrtnieuws')
url_parts.append(article_id)
url = '/'.join(url_parts)
# now create an Article item, and return it. All Articles created during scraping can be written to an output file when the -o option is given.
article = Article()
article['url'] = url
article['intro'] = article_intro
article['title'] = title
article['published_at'] = datetime_iso_str
article['text'] = article_full_text
return article
| [
"bart.aelterman@gmail.com"
] | bart.aelterman@gmail.com |
29db40847985f5a86f1d5fe53a856d1a1b06325a | 7b34296755a3f908f649a2d4da63e3b165ed79c6 | /1.parkir_sederhana.py | f73e040252e2b3568ac5ed9795216f6907287d78 | [] | no_license | elgaridhomaulana/PythonFundamental-DS | 776edd478e92ba7c2e332fec14955e8679df03cc | afce50ae57464047512b7637737bf9c8784d8db9 | refs/heads/master | 2020-07-08T09:10:19.894479 | 2019-10-07T14:53:35 | 2019-10-07T14:53:35 | 203,628,583 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,104 | py | #Konfirmasi Nilai Jam Masuk agar diantara 1-12
check1 = False
while check1 == False:
jam_masuk = input('Jam Masuk Anda (1-12) ')
if jam_masuk.isdigit():
jam_masuk = int(jam_masuk)
if (jam_masuk > 0) and (jam_masuk < 13):
check1 = True
else:
print('Inputan Anda hanya bisa 1-12 saja')
else:
print('Inputan Anda Salah tidak boleh mengandung huruf')
#Konfirmasi Nilai Waktu Masuk agar hanya bisa am/pm
check2 = False
while check2 == False:
masuk_waktu = input('Anda Masuk (am/pm) ')
if (masuk_waktu == 'am') or (masuk_waktu == 'pm'):
check2 = True
else:
print('Anda hanya bisa memasukan waktu masuk antara am atau pm saja')
#Konfirmasi Nilai Jam Keluar agar hanya bisa 1-12
check3 = False
while check3 == False:
jam_keluar = input('Jam Keluar Anda (1-12) ')
if jam_keluar.isdigit():
jam_keluar = int(jam_keluar)
if (jam_keluar > 0) and (jam_keluar < 13):
check3 = True
else:
print('Inputan hanya bisa angka 1-12 saja')
else:
print('Inputan Tidak boleh mengandung huruf')
#Konfirmasi Waktu Keluar agar hanya bisa am/pm saja
check4 = False
while check4 == False:
keluar_waktu = input('Anda Keluar (am/pm) ')
if (keluar_waktu == 'am') or (keluar_waktu == 'pm'):
check4 = True
else:
print('Anda hanya bisa memasukan waktu keluar antara am atau pm saja')
# Pengubahan waktu PM
if masuk_waktu == 'pm':
jam_masuk += 12
if keluar_waktu == 'pm':
jam_keluar += 12
lama_parkir = jam_keluar - jam_masuk
#Penghitungan Lama Waktu Parkir
if (lama_parkir < 0):
lama_parkir += 24
#Penghitungan Biaya Parkir Berdasarkan Lama Parkir
if lama_parkir == 0:
biaya = 3000
elif 0 < lama_parkir <= 3:
biaya = 3000 * lama_parkir
elif lama_parkir > 3:
biaya_3jam_pertama = 3000 * 3
biaya = biaya_3jam_pertama + (lama_parkir-3)*1000
if biaya > 25000 :
biaya = 25000
print('')
#Menampilkan hasil
print(f'Anda parkir selama {lama_parkir} jam, biaya parkir yang Anda bayar Rp {biaya},-') | [
"elgaridhomaulana@gmail.com"
] | elgaridhomaulana@gmail.com |
296b032cb7a9c291dade3733fa18bf133ce9ba65 | 33f9558cb3badaebf0a82994645bdf8a1b67a58e | /members/migrations/0004_auto_20210626_1501.py | 6e826d09f0f28b53471df3aee27413f237b07475 | [] | no_license | taziamoma/TazBB | 96096bf507faf092dcac4284057e337b01caf15f | b6bfa09a205f540af9b38b0e926bedb0835e64d6 | refs/heads/master | 2023-06-17T21:01:56.302457 | 2021-07-03T21:05:15 | 2021-07-03T21:05:15 | 345,442,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 430 | py | # Generated by Django 3.1.7 on 2021-06-26 20:01
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('members', '0003_auto_20210626_1501'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='image',
field=models.ImageField(default='static/default.png', upload_to='static/avatars'),
),
]
| [
"taziamoma@gmail.com"
] | taziamoma@gmail.com |
3957354d65cb0977ccd774d015a5dc5f18eabc6e | ef00062db6ae7a9d6fde0c7b247f3bacd42c78c8 | /models/saloon.py | c07d666afab6741cc96fcec563b7791f2089ffc9 | [] | no_license | feliciah6/hair-care-app-project-db | 697ecce30cd166416042950d775bf5cb02491077 | ecc915c5e0d682fdc7118a6211e5fd1be9d36a7a | refs/heads/master | 2020-04-08T15:47:51.572363 | 2018-11-28T11:35:57 | 2018-11-28T11:35:57 | 159,492,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 534 | py | from peewee import (Model, CharField,TextField, SqliteDatabase, IntegrityError)
import config
DATABASE= config.DATABASE
class Saloon(Model):
name = CharField(max_length=100, unique=True)
business_number = CharField(max_length=100)
opening_time = CharField(max_length=100)
closing_time = CharField(max_length=200)
description = TextField()
services = CharField(max_length=200)
user_id = CharField(max_length=200)
location= CharField(max_length=200)
class Meta:
database = DATABASE
| [
"feliciahmtoka@gmail.com"
] | feliciahmtoka@gmail.com |
fda9cc328fb86e21aad11a50c0a3bcc5a6313ce2 | bbed71bba2aae9a07b6fa7d3f120c61ce45f0cec | /envFinal/bin/pyrsa-encrypt | 72ed62772d20cc75a1927dcfce8b4c1209d4e549 | [] | no_license | la-ursic/happy-new-reads | 786386e483ed29e77b8b56416a54420ba2798c3b | 8ab95a0d6428b7c93f49921d6229dbbc049c903b | refs/heads/master | 2022-12-13T02:08:19.549173 | 2018-07-11T19:55:11 | 2018-07-11T19:55:11 | 113,936,290 | 0 | 1 | null | 2022-12-07T23:48:41 | 2017-12-12T03:02:27 | Python | UTF-8 | Python | false | false | 300 | #!/Users/marioalbertogalindovalero/Documents/Batch17RojaHNRyMas/happy-new-reads/envFinal/bin/python3.6
# -*- coding: utf-8 -*-
import re
import sys
from rsa.cli import encrypt
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(encrypt())
| [
"mariogalindo89@gmail.com"
] | mariogalindo89@gmail.com | |
7c5cb06f6368b90e660e8949c002bb5ac0fba60f | d23e182c1b790cbddbb1b5107f65a505b77f864c | /files.py | 0daf981dc42793f8bcbb414bff2354ff1a20d8ff | [] | no_license | orkarma/Python-crash-coarse-refresher | 1fed90343bd88215a1cd39c1f9a43ba8d1187819 | 2447ae8d4f65373e991cc457e8bbb1d5acee61bd | refs/heads/master | 2021-06-21T08:48:18.668524 | 2017-08-17T17:51:22 | 2017-08-17T17:51:22 | 100,592,094 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 474 | py | #open a file
fo = open('test.txt', 'w')
#get some info
print('Name: ', fo.name)
print('is closed: ', fo.closed)
print('opening mode: ', fo.mode)
#write to file
fo.write('i love python')
fo.write(' and javascript.')
#close file
fo.close
#open to append
fo = open('test.txt', 'a')
fo.write(' i also like PHP.')
fo.close
#read from file
fo = open('test.txt', 'r+')
text = fo.read()
print(text)
#create file
fo = open('test2.txt', 'w+')
fo.write('this is my new file')
fo.close | [
"orkarma@me.com"
] | orkarma@me.com |
2ee08687f24add09aece08977904e71776c2ec35 | aa0122b410936cd77dd79e3e9ff0d007ad4807e1 | /musicbot/exceptions.py | f15008d361e3c4f3eacd728edcf9af2c031c99ff | [] | no_license | peanutcorn/Kyunghee-portfolio | 405bf766727b6ddefe06cf4fd639ba0dd71defcb | 8ad9c6d3d5e773df5118fab9704cfb722e547a5b | refs/heads/master | 2022-12-30T21:11:25.772148 | 2020-10-12T04:40:43 | 2020-10-12T04:40:43 | 299,931,770 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,690 | py | import shutil
import textwrap
class MusicbotException(Exception):
def __init__(self, message, *, expire_in=0):
super().__init__(message)
self._message = message
self.expire_in = expire_in
@property
def message(self):
return self._message
@property
def message_no_format(self):
return self._message
class CommandError(MusicbotException):
pass
class ExtractionError(MusicbotException):
pass
# TODO: Add typing options instead of is_playlist
class WrongEntryTypeError(ExtractionError):
def __init__(self, message, is_playlist, use_url):
super().__init__(message)
self.is_playlist = is_playlist
self.use_url = use_url
class FFmpegError(MusicbotException):
pass
class FFmpegWarning(MusicbotException):
pass
class SpotifyError(MusicbotException):
pass
class PermissionsError(CommandError):
@property
def message(self):
return "You don't have permission to use that command.\nReason: " + self._message
class HelpfulError(MusicbotException):
def __init__(self, issue, solution, *, preface="An error has occured:", footnote='', expire_in=0):
self.issue = issue
self.solution = solution
self.preface = preface
self.footnote = footnote
self.expire_in = expire_in
self._message_fmt = "\n{preface}\n{problem}\n\n{solution}\n\n{footnote}"
@property
def message(self):
return self._message_fmt.format(
preface = self.preface,
problem = self._pretty_wrap(self.issue, " Problem:"),
solution = self._pretty_wrap(self.solution, " Solution:"),
footnote = self.footnote
)
@property
def message_no_format(self):
return self._message_fmt.format(
preface = self.preface,
problem = self._pretty_wrap(self.issue, " Problem:", width=None),
solution = self._pretty_wrap(self.solution, " Solution:", width=None),
footnote = self.footnote
)
@staticmethod
def _pretty_wrap(text, pretext, *, width=-1):
if width is None:
return '\n'.join((pretext.strip(), text))
elif width == -1:
pretext = pretext.rstrip() + '\n'
width = shutil.get_terminal_size().columns
lines = textwrap.wrap(text, width=width - 5)
lines = ((' ' + line).rstrip().ljust(width-1).rstrip() + '\n' for line in lines)
return pretext + ''.join(lines).rstrip()
class HelpfulWarning(HelpfulError):
pass
class Signal(Exception):
pass
class RestartSignal(Signal):
pass
class TerminateSignal(Signal):
pass | [
"bamtoto12@daum.net"
] | bamtoto12@daum.net |
fd5aff6a2751058553ded48699d43609293441fd | 9c73dd3043f7db7c9ec76d560484e99ad134fdb6 | /students/Wooseok J/lesson01/assignment/inventory_management/market_prices.py | 14bc8cc95c2ef01edf3b007859d5f427ba771cc8 | [] | no_license | UWPCE-PythonCert-ClassRepos/py220-online-201904-V2 | 546b316025b680ca28d24b523663095398616b13 | ac12beeae8aa57135bbcd03ac7a4f977fa3bdb56 | refs/heads/master | 2022-12-10T03:14:25.514630 | 2019-06-11T02:14:17 | 2019-06-11T02:14:17 | 179,139,181 | 1 | 19 | null | 2022-12-08T01:43:38 | 2019-04-02T18:49:10 | Python | UTF-8 | Python | false | false | 185 | py | #!/usr/bin/env python3
# pylint: disable=C0111
# pylint: disable=W0613
def get_latest_price(item_code):
"""
doc string
"""
return 24
return item_code
| [
"noreply@github.com"
] | UWPCE-PythonCert-ClassRepos.noreply@github.com |
45585b168e7f39155dc61008a1d2d7b0e980cf38 | 5f38989326b66309a3a93d8300137b76eb273ddf | /src/train.py | 20c659a47540f1edd949fd30cda65ebfc266551e | [] | no_license | MaoXianXin/convolution-network-natural-scenes | c9ec06fe4c093460c17ff24dada7b0de0009666b | c2d6ad2db09e9325ba2418d16ff40e43a94c5516 | refs/heads/main | 2023-02-16T23:02:36.428524 | 2021-01-14T05:50:40 | 2021-01-14T05:50:40 | 327,226,219 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,836 | py | import os
import time
import tensorflow as tf
import pandas as pd
import matplotlib.pyplot as plt
from tensorflow.keras.models import save_model
from tensorflow.keras.layers import BatchNormalization
from nets.conv_net import ConvModel
from utils.data_generator import train_val_generator
from utils.image_plot import plot_images
import tempfile
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
train_gen = train_val_generator(
data_dir='../dataset/natural-scenes/seg_train',
target_size=(224, 224), # 把图片的h和w从64变成150,增大图片的分辨率
batch_size=32,
class_mode='categorical',
subset='training')
val_gen = train_val_generator(
data_dir='../dataset/natural-scenes/seg_train',
target_size=(224, 224),
batch_size=32,
class_mode='categorical',
subset='validation')
# ImageDataGenerator的返回结果是个迭代器,调用一次才会吐一次结果,可以使用.next()函数分批读取图片。
# 取15张训练集图片进行查看
train_batch, train_label_batch = train_gen.next()
# plot_images(train_batch, train_label_batch)
# 取15张测试集图片进行查看
val_batch, val_label_batch = val_gen.next()
# plot_images(val_batch, val_label_batch)
# 类实例化
base_model = tf.keras.applications.VGG16(input_shape=(224, 224, 3),
include_top=False,
weights='imagenet')
print(base_model.summary())
base_model.trainable = True
global_average_layer = tf.keras.layers.GlobalAveragePooling2D()
# fc1 = tf.keras.layers.Dense(512)
prediction_layer = tf.keras.layers.Dense(6, activation='softmax', name='onnx_output')
inputs = tf.keras.Input(shape=(224, 224, 3), name='onnx_input')
x = base_model(inputs, training=True)
x = global_average_layer(x)
# x = fc1(x)
x = BatchNormalization()(x)
outputs = prediction_layer(x)
model = tf.keras.Model(inputs, outputs)
print(model.summary())
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_accuracy', patience=5, mode='max', restore_best_weights=True)]
'''
模型设置tf.keras.Sequential.compile
用到的参数:
- loss:损失函数,对于分类任务,如果标签没做onehot编码,一般使用"sparse_categorical_crossentropy",否则使用"categorical_crossentropy"。
- optimizer:优化器,这里选用"sgd",更多优化器请查看https://tensorflow.google.cn/api_docs/python/tf/keras/optimizers。
- metrics:评价指标,这里选用"accuracy",更多优化器请查看https://tensorflow.google.cn/api_docs/python/tf/keras/metrics。
'''
# 设置损失函数loss、优化器optimizer、评价标准metrics
model.compile(
loss='categorical_crossentropy',
optimizer=tf.keras.optimizers.SGD(
learning_rate=1e-3, momentum=0.9, decay=1e-3),
metrics=['accuracy'])
'''
模型训练tf.keras.Sequential.fit
用到的参数:
- x:输入的训练集,可以用ImageDataGenerator读取的数据。
- steps_per_epoch:输入整数,每一轮跑多少步数,这个数可以通过 图片总量/batch_size 得到,如2520/32=78.75。
- epochs:输入整数,数据集跑多少轮模型训练,一轮表示整个数据集训练一次。
- validation_data:输入的验证集,也可以用ImageDataGenerator读取的数据。
- validation_steps:输入整数,验证集跑多少步来计算模型的评价指标,一步会读取batch_size张图片,所以一共验证validation_steps * batch_size张图片。
- shuffle:每轮训练是否打乱数据顺序,默认True。
返回:
History对象,History.history属性会记录每一轮训练集和验证集的损失函数值和评价指标。
'''
history = model.fit(x=train_gen,
epochs=50, validation_data=val_gen,
shuffle=True, callbacks=callbacks)
eval_history = model.evaluate(val_gen)
print('-----val_loss-----', '\n', eval_history[0])
print('-----val_acc-----', '\n', eval_history[1])
# 画图查看history数据的变化趋势
pd.DataFrame(history.history).plot(figsize=(8, 5))
plt.grid(True)
plt.xlabel('epoch')
# plt.show()
plt.savefig('train_log.png')
'''
模型保存tf.keras.models.save_model
用到的参数:
- model:要保存的模型,也就是搭建的keras.Sequential。
- filepath:模型保存路径。
'''
# 模型保存
# 创建保存路径
MODEL_DIR = tempfile.gettempdir() + '/natural_scenes/'
version = 1
export_path = os.path.join(MODEL_DIR, str(version))
print('export_path = {}\n'.format(export_path))
tf.keras.models.save_model(
model,
export_path,
overwrite=True,
include_optimizer=True,
save_format=None,
signatures=None,
options=None
) | [
"qq1044467857@gmail.com"
] | qq1044467857@gmail.com |
ea386863fc4bcc4650983431b032e9c25ddd69a7 | 8ca2c5b9673c9bf9a7b6033ffc7b3aea7008ca91 | /src/gdata/finance/__init__.py | f207b212756b50e71075be803fb53ec064e8dcbe | [
"Apache-2.0"
] | permissive | hfalcic/google-gdata | c3a10f0260002c3d8a8d44686572ec2002e076e0 | 56d49a9915ce51590a655ec5f8aeef9f65517787 | refs/heads/master | 2021-01-10T22:01:52.403803 | 2015-02-17T15:12:18 | 2015-02-17T15:12:18 | 24,432,292 | 3 | 1 | null | 2014-11-30T07:26:44 | 2014-09-24T20:53:59 | Python | UTF-8 | Python | false | false | 15,423 | py | #!/usr/bin/env python
#
# Copyright (C) 2009 Tan Swee Heng
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Finance."""
from __future__ import unicode_literals
__author__ = 'thesweeheng@gmail.com'
import atom
import gdata
GD_NAMESPACE = 'http://schemas.google.com/g/2005'
GF_NAMESPACE = 'http://schemas.google.com/finance/2007'
class Money(atom.AtomBase):
"""The <gd:money> element."""
_tag = 'money'
_namespace = GD_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['amount'] = 'amount'
_attributes['currencyCode'] = 'currency_code'
def __init__(self, amount=None, currency_code=None, **kwargs):
self.amount = amount
self.currency_code = currency_code
atom.AtomBase.__init__(self, **kwargs)
def __str__(self):
return "%s %s" % (self.amount, self.currency_code)
def MoneyFromString(xml_string):
return atom.CreateClassFromXMLString(Money, xml_string)
class _Monies(atom.AtomBase):
"""An element containing multiple <gd:money> in multiple currencies."""
_namespace = GF_NAMESPACE
_children = atom.AtomBase._children.copy()
_children['{%s}money' % GD_NAMESPACE] = ('money', [Money])
def __init__(self, money=None, **kwargs):
self.money = money or []
atom.AtomBase.__init__(self, **kwargs)
def __str__(self):
return " / ".join(["%s" % i for i in self.money])
class CostBasis(_Monies):
"""The <gf:costBasis> element."""
_tag = 'costBasis'
def CostBasisFromString(xml_string):
return atom.CreateClassFromXMLString(CostBasis, xml_string)
class DaysGain(_Monies):
"""The <gf:daysGain> element."""
_tag = 'daysGain'
def DaysGainFromString(xml_string):
return atom.CreateClassFromXMLString(DaysGain, xml_string)
class Gain(_Monies):
"""The <gf:gain> element."""
_tag = 'gain'
def GainFromString(xml_string):
return atom.CreateClassFromXMLString(Gain, xml_string)
class MarketValue(_Monies):
"""The <gf:marketValue> element."""
_tag = 'gain'
_tag = 'marketValue'
def MarketValueFromString(xml_string):
return atom.CreateClassFromXMLString(MarketValue, xml_string)
class Commission(_Monies):
"""The <gf:commission> element."""
_tag = 'commission'
def CommissionFromString(xml_string):
return atom.CreateClassFromXMLString(Commission, xml_string)
class Price(_Monies):
"""The <gf:price> element."""
_tag = 'price'
def PriceFromString(xml_string):
return atom.CreateClassFromXMLString(Price, xml_string)
class Symbol(atom.AtomBase):
"""The <gf:symbol> element."""
_tag = 'symbol'
_namespace = GF_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['fullName'] = 'full_name'
_attributes['exchange'] = 'exchange'
_attributes['symbol'] = 'symbol'
def __init__(self, full_name=None, exchange=None, symbol=None, **kwargs):
self.full_name = full_name
self.exchange = exchange
self.symbol = symbol
atom.AtomBase.__init__(self, **kwargs)
def __str__(self):
return "%s:%s (%s)" % (self.exchange, self.symbol, self.full_name)
def SymbolFromString(xml_string):
return atom.CreateClassFromXMLString(Symbol, xml_string)
class TransactionData(atom.AtomBase):
"""The <gf:transactionData> element."""
_tag = 'transactionData'
_namespace = GF_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['type'] = 'type'
_attributes['date'] = 'date'
_attributes['shares'] = 'shares'
_attributes['notes'] = 'notes'
_children = atom.AtomBase._children.copy()
_children['{%s}commission' % GF_NAMESPACE] = ('commission', Commission)
_children['{%s}price' % GF_NAMESPACE] = ('price', Price)
def __init__(self, type=None, date=None, shares=None,
notes=None, commission=None, price=None, **kwargs):
self.type = type
self.date = date
self.shares = shares
self.notes = notes
self.commission = commission
self.price = price
atom.AtomBase.__init__(self, **kwargs)
def TransactionDataFromString(xml_string):
return atom.CreateClassFromXMLString(TransactionData, xml_string)
class TransactionEntry(gdata.GDataEntry):
"""An entry of the transaction feed.
A TransactionEntry contains TransactionData such as the transaction
type (Buy, Sell, Sell Short, or Buy to Cover), the number of units,
the date, the price, any commission, and any notes.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_children['{%s}transactionData' % GF_NAMESPACE] = (
'transaction_data', TransactionData)
def __init__(self, transaction_data=None, **kwargs):
self.transaction_data = transaction_data
gdata.GDataEntry.__init__(self, **kwargs)
def transaction_id(self):
return self.id.text.split("/")[-1]
transaction_id = property(transaction_id, doc='The transaction ID.')
def TransactionEntryFromString(xml_string):
return atom.CreateClassFromXMLString(TransactionEntry, xml_string)
class TransactionFeed(gdata.GDataFeed):
"""A feed that lists all of the transactions that have been recorded for
a particular position.
A transaction is a collection of information about an instance of
buying or selling a particular security. The TransactionFeed lists all
of the transactions that have been recorded for a particular position
as a list of TransactionEntries.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [TransactionEntry])
def TransactionFeedFromString(xml_string):
return atom.CreateClassFromXMLString(TransactionFeed, xml_string)
class TransactionFeedLink(atom.AtomBase):
"""Link to TransactionFeed embedded in PositionEntry.
If a PositionFeed is queried with transactions='true', TransactionFeeds
are inlined in the returned PositionEntries. These TransactionFeeds are
accessible via TransactionFeedLink's feed attribute.
"""
_tag = 'feedLink'
_namespace = GD_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['href'] = 'href'
_children = atom.AtomBase._children.copy()
_children['{%s}feed' % atom.ATOM_NAMESPACE] = (
'feed', TransactionFeed)
def __init__(self, href=None, feed=None, **kwargs):
self.href = href
self.feed = feed
atom.AtomBase.__init__(self, **kwargs)
class PositionData(atom.AtomBase):
"""The <gf:positionData> element."""
_tag = 'positionData'
_namespace = GF_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['gainPercentage'] = 'gain_percentage'
_attributes['return1w'] = 'return1w'
_attributes['return4w'] = 'return4w'
_attributes['return3m'] = 'return3m'
_attributes['returnYTD'] = 'returnYTD'
_attributes['return1y'] = 'return1y'
_attributes['return3y'] = 'return3y'
_attributes['return5y'] = 'return5y'
_attributes['returnOverall'] = 'return_overall'
_attributes['shares'] = 'shares'
_children = atom.AtomBase._children.copy()
_children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis)
_children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain)
_children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain)
_children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue)
def __init__(self, gain_percentage=None,
return1w=None, return4w=None, return3m=None, returnYTD=None,
return1y=None, return3y=None, return5y=None, return_overall=None,
shares=None, cost_basis=None, days_gain=None,
gain=None, market_value=None, **kwargs):
self.gain_percentage = gain_percentage
self.return1w = return1w
self.return4w = return4w
self.return3m = return3m
self.returnYTD = returnYTD
self.return1y = return1y
self.return3y = return3y
self.return5y = return5y
self.return_overall = return_overall
self.shares = shares
self.cost_basis = cost_basis
self.days_gain = days_gain
self.gain = gain
self.market_value = market_value
atom.AtomBase.__init__(self, **kwargs)
def PositionDataFromString(xml_string):
return atom.CreateClassFromXMLString(PositionData, xml_string)
class PositionEntry(gdata.GDataEntry):
"""An entry of the position feed.
A PositionEntry contains the ticker exchange and Symbol for a stock,
mutual fund, or other security, along with PositionData such as the
number of units of that security that the user holds, and performance
statistics.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_children['{%s}positionData' % GF_NAMESPACE] = (
'position_data', PositionData)
_children['{%s}symbol' % GF_NAMESPACE] = ('symbol', Symbol)
_children['{%s}feedLink' % GD_NAMESPACE] = (
'feed_link', TransactionFeedLink)
def __init__(self, position_data=None, symbol=None, feed_link=None,
**kwargs):
self.position_data = position_data
self.symbol = symbol
self.feed_link = feed_link
gdata.GDataEntry.__init__(self, **kwargs)
def position_title(self):
return self.title.text
position_title = property(position_title,
doc='The position title as a string (i.e. position.title.text).')
def ticker_id(self):
return self.id.text.split("/")[-1]
ticker_id = property(ticker_id, doc='The position TICKER ID.')
def transactions(self):
if self.feed_link.feed:
return self.feed_link.feed.entry
else:
return None
transactions = property(transactions, doc="""
Inlined TransactionEntries are returned if PositionFeed is queried
with transactions='true'.""")
def PositionEntryFromString(xml_string):
return atom.CreateClassFromXMLString(PositionEntry, xml_string)
class PositionFeed(gdata.GDataFeed):
"""A feed that lists all of the positions in a particular portfolio.
A position is a collection of information about a security that the
user holds. The PositionFeed lists all of the positions in a particular
portfolio as a list of PositionEntries.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PositionEntry])
def PositionFeedFromString(xml_string):
return atom.CreateClassFromXMLString(PositionFeed, xml_string)
class PositionFeedLink(atom.AtomBase):
"""Link to PositionFeed embedded in PortfolioEntry.
If a PortfolioFeed is queried with positions='true', the PositionFeeds
are inlined in the returned PortfolioEntries. These PositionFeeds are
accessible via PositionFeedLink's feed attribute.
"""
_tag = 'feedLink'
_namespace = GD_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['href'] = 'href'
_children = atom.AtomBase._children.copy()
_children['{%s}feed' % atom.ATOM_NAMESPACE] = (
'feed', PositionFeed)
def __init__(self, href=None, feed=None, **kwargs):
self.href = href
self.feed = feed
atom.AtomBase.__init__(self, **kwargs)
class PortfolioData(atom.AtomBase):
"""The <gf:portfolioData> element."""
_tag = 'portfolioData'
_namespace = GF_NAMESPACE
_attributes = atom.AtomBase._attributes.copy()
_attributes['currencyCode'] = 'currency_code'
_attributes['gainPercentage'] = 'gain_percentage'
_attributes['return1w'] = 'return1w'
_attributes['return4w'] = 'return4w'
_attributes['return3m'] = 'return3m'
_attributes['returnYTD'] = 'returnYTD'
_attributes['return1y'] = 'return1y'
_attributes['return3y'] = 'return3y'
_attributes['return5y'] = 'return5y'
_attributes['returnOverall'] = 'return_overall'
_children = atom.AtomBase._children.copy()
_children['{%s}costBasis' % GF_NAMESPACE] = ('cost_basis', CostBasis)
_children['{%s}daysGain' % GF_NAMESPACE] = ('days_gain', DaysGain)
_children['{%s}gain' % GF_NAMESPACE] = ('gain', Gain)
_children['{%s}marketValue' % GF_NAMESPACE] = ('market_value', MarketValue)
def __init__(self, currency_code=None, gain_percentage=None,
return1w=None, return4w=None, return3m=None, returnYTD=None,
return1y=None, return3y=None, return5y=None, return_overall=None,
cost_basis=None, days_gain=None, gain=None, market_value=None, **kwargs):
self.currency_code = currency_code
self.gain_percentage = gain_percentage
self.return1w = return1w
self.return4w = return4w
self.return3m = return3m
self.returnYTD = returnYTD
self.return1y = return1y
self.return3y = return3y
self.return5y = return5y
self.return_overall = return_overall
self.cost_basis = cost_basis
self.days_gain = days_gain
self.gain = gain
self.market_value = market_value
atom.AtomBase.__init__(self, **kwargs)
def PortfolioDataFromString(xml_string):
return atom.CreateClassFromXMLString(PortfolioData, xml_string)
class PortfolioEntry(gdata.GDataEntry):
"""An entry of the PortfolioFeed.
A PortfolioEntry contains the portfolio's title along with PortfolioData
such as currency, total market value, and overall performance statistics.
"""
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_children['{%s}portfolioData' % GF_NAMESPACE] = (
'portfolio_data', PortfolioData)
_children['{%s}feedLink' % GD_NAMESPACE] = (
'feed_link', PositionFeedLink)
def __init__(self, portfolio_data=None, feed_link=None, **kwargs):
self.portfolio_data = portfolio_data
self.feed_link = feed_link
gdata.GDataEntry.__init__(self, **kwargs)
def portfolio_title(self):
return self.title.text
def set_portfolio_title(self, portfolio_title):
self.title = atom.Title(text=portfolio_title, title_type='text')
portfolio_title = property(portfolio_title, set_portfolio_title,
doc='The portfolio title as a string (i.e. portfolio.title.text).')
def portfolio_id(self):
return self.id.text.split("/")[-1]
portfolio_id = property(portfolio_id,
doc='The portfolio ID. Do not confuse with portfolio.id.')
def positions(self):
if self.feed_link.feed:
return self.feed_link.feed.entry
else:
return None
positions = property(positions, doc="""
Inlined PositionEntries are returned if PortfolioFeed was queried
with positions='true'.""")
def PortfolioEntryFromString(xml_string):
return atom.CreateClassFromXMLString(PortfolioEntry, xml_string)
class PortfolioFeed(gdata.GDataFeed):
"""A feed that lists all of the user's portfolios.
A portfolio is a collection of positions that the user holds in various
securities, plus metadata. The PortfolioFeed lists all of the user's
portfolios as a list of PortfolioEntries.
"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [PortfolioEntry])
def PortfolioFeedFromString(xml_string):
return atom.CreateClassFromXMLString(PortfolioFeed, xml_string)
| [
"harvey.falcic@gmail.com"
] | harvey.falcic@gmail.com |
5bf6d8e5da9416d75daaa4e067ae7119ca58f647 | c2c6798ced0db33b2669f11f2434596c61496aef | /fastparquet/__init__.py | 38dec432f8c525661a842f3d0a7c473b1fa9f2e3 | [
"Apache-2.0"
] | permissive | PGryllos/fastparquet | e037b0d5e6387746f82e91fd9b4240962f178308 | 07401c501dbfc55c456052413f0c904483c68b50 | refs/heads/master | 2020-04-04T19:09:27.392744 | 2018-10-24T18:31:06 | 2018-10-24T18:31:06 | 156,194,372 | 0 | 0 | Apache-2.0 | 2018-11-05T09:46:52 | 2018-11-05T09:46:52 | null | UTF-8 | Python | false | false | 424 | py | """parquet - read parquet files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from .thrift_structures import parquet_thrift
from .core import read_thrift
from .writer import write
from . import core, schema, converted_types, api
from .api import ParquetFile
from .util import ParquetException
__version__ = "0.1.6"
| [
"martin.durant@utoronto.ca"
] | martin.durant@utoronto.ca |
786f5b07ccf24aa451d837abb27f2a99e6de36a7 | e9dc636a1a0e10b4593029f647efaf242bf34b9d | /src/7.3.0-TranferLearning-Sound2Vec-AutoEncoder-LSTM/USCData.py | 3bc871aea035bc56d0bede621af074eabc9735a3 | [] | no_license | mehmetpekmezci/urban_sound_classification | e45c2fb5264029ebaca8d1d1f00adac3d56cb783 | 8b3927ff5430df866f6a42af379e68dc6d5c74d5 | refs/heads/master | 2021-06-09T02:02:32.934628 | 2020-05-10T15:30:59 | 2020-05-10T15:30:59 | 114,467,064 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,946 | py | #!/usr/bin/env python3
from USCHeader import *
class USCData :
def __init__(self, logger):
self.logger = logger
self.script_dir=os.path.dirname(os.path.realpath(__file__))
self.script_name=os.path.basename(self.script_dir)
self.fold_dirs=['fold1','fold2','fold3','fold4','fold5','fold6','fold7','fold8','fold9','fold10']
#self.fold_dirs=['fold1','fold10']
#self.fold_dirs=['fold1']
self.main_data_dir=self.script_dir+'/../../data/'
self.raw_data_dir=self.main_data_dir+'/0.raw/UrbanSound8K/audio'
self.csv_data_dir=self.main_data_dir+'/1.csv'
self.np_data_dir=self.main_data_dir+'/2.np'
self.sound_record_sampling_rate=22050 # 22050 sample points per second
self.track_length=4*self.sound_record_sampling_rate # 4 seconds record
self.time_slice_length=2000
#self.time_slice_length=440
#self.time_slice_length=55
self.time_slice_overlap_length=200
#self.time_slice_overlap_length=265
#self.time_slice_overlap_length=30
self.number_of_time_slices=math.floor(self.track_length/(self.time_slice_length-self.time_slice_overlap_length))
self.number_of_classes=10
self.mini_batch_size=50
self.fold_data_dictionary=dict()
self.youtube_data_file_dictionary=dict()
self.current_youtube_data=[]
self.youtube_data_max_category_data_file_count=0
self.current_data_file_number=0
self.word2vec_window_size=7 ## Bunu degistirme
self.latent_space_presentation_data_length=0 ## WILL BE SET IN USCAutoEncoder.buildModel method.
def parse_audio_files(self):
sub4SecondSoundFilesCount=0
for sub_dir in self.fold_dirs:
self.logger.info("Parsing : "+sub_dir)
csvDataFile=open(self.csv_data_dir+"/"+sub_dir+".csv", 'w')
csvDataWriter = csv.writer(csvDataFile, delimiter=',', quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n')
for file_path in glob.glob(os.path.join(self.raw_data_dir, sub_dir, '*.wav')):
self.logger.info(file_path)
try :
classNumber=file_path.split('/')[-1].split('.')[0].split('-')[1]
sound_data,sampling_rate=librosa.load(file_path)
sound_data=np.array(sound_data)
sound_data_duration=int(sound_data.shape[0]/self.sound_record_sampling_rate)
if sound_data_duration < 4 :
sub4SecondSoundFilesCount=sub4SecondSoundFilesCount+1
sound_data_in_4_second=np.zeros(4*self.sound_record_sampling_rate)
for i in range(sound_data.shape[0]):
sound_data_in_4_second[i]=sound_data[i]
else :
sound_data_in_4_second=sound_data[:4*self.sound_record_sampling_rate]
sound_data_in_4_second=np.append(sound_data_in_4_second,[classNumber])
csvDataWriter.writerow(sound_data_in_4_second)
except :
e = sys.exc_info()[0]
self.logger.info ("Exception :")
self.logger.info (e)
csvDataFile.close()
self.logger.info("sub4SecondSoundFilesCount="+str(sub4SecondSoundFilesCount));
def prepareData(self):
self.logger.info("Starting to prepare the data ... ")
if not os.path.exists(self.raw_data_dir) :
if not os.path.exists(self.main_data_dir+'/../data/0.raw'):
os.makedirs(self.main_data_dir+'/../data/0.raw')
if not os.path.exists(self.main_data_dir+"/0.raw/UrbanSound8K"):
if os.path.exists(self.main_data_dir+"/0.raw/UrbanSound8K.tar.gz"):
self.logger.info("Extracting "+self.main_data_dir+"/0.raw/UrbanSound8K.tar.gz")
tar = tarfile.open(self.main_data_dir+"/0.raw/UrbanSound8K.tar.gz")
tar.extractall(self.main_data_dir+'/../data/0.raw')
tar.close()
self.logger.info("Extracted "+self.main_data_dir+"/0.raw/UrbanSound8K.tar.gz")
else :
self.logger.info("download "+self.main_data_dir+"/0.raw/UrbanSound8K.tar.gz from http://serv.cusp.nyu.edu/files/jsalamon/datasets/content_loader.php?id=2 using firefox browser or chromium and re-run this script")
# self.logger.info("download "+self.main_data_dir+"/0.raw/UrbanSound8K.tar.gz from https://serv.cusp.nyu.edu/projects/urbansounddataset/download-urbansound8k.html using firefox browser or chromium and re-run this script")
exit(1)
# http = urllib3.PoolManager()
# chunk_size=100000
# r = http.request('GET', 'http://serv.cusp.nyu.edu/files/jsalamon/datasets/content_loader.php?id=2', preload_content=False)
# with open(self.main_data_dir+"/0.raw/UrbanSound8K.tar.gz", 'wb') as out:
# while True:
# data = r.read(chunk_size)
# if not data:
# break
# out.write(data)
# r.release_conn()
if not os.path.exists(self.csv_data_dir) :
os.makedirs(self.csv_data_dir)
parse_audio_files()
if not os.path.exists(self.np_data_dir) :
os.makedirs(self.np_data_dir)
save_as_np()
self.logger.info("Data is READY in CSV format. ")
self.load_all_np_data_back_to_memory()
def save_as_np(self):
self.logger.info ("save_as_np function started ...")
fold_data_dictionary=dict()
max_value_for_normalization=0
min_value_for_normalization=0
for fold in self.fold_dirs:
fold_data_dictionary[fold]=np.array(np.loadtxt(open(self.csv_data_dir+"/"+fold+".csv", "rb"), delimiter=","))
for i in range(fold_data_dictionary[fold].shape[0]) :
loadedData=fold_data_dictionary[fold][i]
loadedDataX=loadedData[:4*self.sound_record_sampling_rate]
loadedDataY=loadedData[4*self.sound_record_sampling_rate]
maxOfArray=np.amax(loadedDataX)
minOfArray=np.amin(loadedDataX)
if max_value_for_normalization < maxOfArray :
max_value_for_normalization = maxOfArray
if min_value_for_normalization > minOfArray :
min_value_for_normalization = minOfArray
## Then append Y data to the end of row
np.save(self.main_data_dir+"/2.np/"+fold+".npy", fold_data_dictionary[fold])
np.save(self.main_data_dir+"/2.np/minmax.npy",[min_value_for_normalization,max_value_for_normalization])
self.logger.info ("save_as_np function finished ...")
def normalize(self,data):
normalized_data = data/np.linalg.norm(data)
return normalized_data
def one_hot_encode_array(self,arrayOfYData):
returnMatrix=np.empty([0,self.number_of_classes]);
for i in range(arrayOfYData.shape[0]):
one_hot_encoded_class_number = np.zeros(self.number_of_classes)
one_hot_encoded_class_number[int(arrayOfYData[i])]=1
returnMatrix=np.row_stack([returnMatrix, one_hot_encoded_class_number])
return returnMatrix
def one_hot_encode(self,classNumber):
one_hot_encoded_class_number = np.zeros(self.number_of_classes)
one_hot_encoded_class_number[int(classNumber)]=1
return one_hot_encoded_class_number
def findListOfYoutubeDataFiles(self):
self.logger.info ("Crawling Youtube Data Files From Directory ../../youtube/downloads/ ...")
if not os.path.exists('../../youtube/raw/'):
self.logger.info("../../youtube/raw/ directory does not exist.")
self.logger.info("Please do the following :")
self.logger.info(" 1. cd ../../youtube/")
self.logger.info(" 2. ./download.sh")
self.logger.info(" 3. ./convertAll.sh")
self.logger.info(" 4. ./splitAll.sh")
self.logger.info(" 5. python3 prepareNPYDataFiles.py")
exit(1);
if len(glob.glob('../../youtube/raw/*/*.npy')) == 0:
self.logger.info("../../youtube/raw/*/*.npy data files do not exist , first go to ../../youtube directory and run 'python3 prepareNPYDataFiles.py' ")
exit(1);
for category in glob.glob('../../youtube/raw/*/'):
dataFileList=glob.glob(category+'/*.npy')
if len(dataFileList) > self.youtube_data_max_category_data_file_count :
self.youtube_data_max_category_data_file_count=len(dataFileList)
self.youtube_data_file_dictionary[category]=random.sample(dataFileList,len(dataFileList))
def loadNextYoutubeData(self):
self.current_youtube_data=np.empty([0,4*self.sound_record_sampling_rate])
for category in self.youtube_data_file_dictionary :
dataFileList= self.youtube_data_file_dictionary[category]
if len(dataFileList) > self.current_data_file_number :
self.logger.info("loading"+ category+'/data.'+str(self.current_data_file_number)+'.npy')
loadedData=np.load(category+'/data.'+str(self.current_data_file_number)+'.npy')
loadedData=loadedData[:,:4*self.sound_record_sampling_rate]
#listOf4SecondRecords=loadedData.tolist()
#self.logger.info(len(listOf4SecondRecords))
self.current_youtube_data=np.vstack((self.current_youtube_data,loadedData)) ## this appends listOf4SecondRecords to self.current_youtube_data
self.current_data_file_number= (self.current_data_file_number+1)%self.youtube_data_max_category_data_file_count
np.random.shuffle(self.current_youtube_data)
self.logger.info(self.current_youtube_data.shape)
return self.current_youtube_data
def load_all_np_data_back_to_memory(self):
self.logger.info ("load_all_np_data_back_to_memory function started ...")
for fold in self.fold_dirs:
self.logger.info ("loading from "+self.main_data_dir+"/2.np/"+fold+".npy ...")
self.fold_data_dictionary[fold]=np.load(self.main_data_dir+"/2.np/"+fold+".npy")
minmax=np.load(self.main_data_dir+"/2.np/minmax.npy")
min_value_for_normalization=minmax[0]
max_value_for_normalization=minmax[1]
self.logger.info ("load_all_np_data_back_to_memory function finished ...")
return max_value_for_normalization,min_value_for_normalization
def get_fold_data(self,fold):
return np.random.permutation(self.fold_data_dictionary[fold])
def augment_speedx(self,sound_array, factor):
""" Multiplies the sound's speed by some `factor` """
result=np.zeros(len(sound_array))
indices = np.round( np.arange(0, len(sound_array), factor) )
indices = indices[indices < len(sound_array)].astype(int)
result_calculated= sound_array[ indices.astype(int) ]
if len(result) > len(result_calculated) :
result[:len(result_calculated)]=result_calculated
else :
result=result_calculated[:len(result)]
return result
def augment_inverse(self,sound_array):
return -sound_array
def augment_volume(self,sound_array,factor):
return factor * sound_array
def augment_translate(self,snd_array, n):
""" Translates the sound wave by n indices, fill the first n elements of the array with zeros """
new_array=np.zeros(len(snd_array))
new_array[n:]=snd_array[:-n]
return new_array
def overlapping_slice(self,x_data,hanning=False):
sliced_and_overlapped_data=np.zeros([self.mini_batch_size,self.number_of_time_slices,self.time_slice_length])
step=self.time_slice_length-self.time_slice_overlap_length
hanning_window=np.hanning(self.time_slice_length)
for i in range(self.mini_batch_size):
for j in range(self.number_of_time_slices):
step_index=j*step
if step_index+self.time_slice_length>x_data.shape[1]:
overlapped_time_slice=np.zeros(self.time_slice_length)
overlapped_time_slice[0:int(x_data.shape[1]-step_index)]=x_data[i,step_index:x_data.shape[1]]
else :
overlapped_time_slice=x_data[i,step_index:step_index+self.time_slice_length]
sliced_and_overlapped_data[i,j]=overlapped_time_slice
if hanning :
sliced_and_overlapped_data[i,j]*=hanning_window
#self.logger.info(sliced_and_overlapped_data.shape)
#self.logger.info(sliced_and_overlapped_data[0][100][step])
#self.logger.info(sliced_and_overlapped_data[0][101][0])
return sliced_and_overlapped_data
#x_input_list = tf.unstack(self.x_input_reshaped, self.number_of_time_slices, 1)
def fft(self,x_data):
#deneme_data=x_data[15][25]
#self.logger.info("deneme_datae[18]="+str(deneme_data[18]))
#fft_deneme_data=np.abs(np.fft.fft(deneme_data))
#self.logger.info("fft_deneme_data[18]="+str(fft_deneme_data[18]))
x_data = np.abs(np.fft.fft(x_data))
#self.logger.info("x_data[15][25][18]="+str(x_data[15][25][18]))
return x_data
def convert_to_list_of_word2vec_window_sized_data(self,x_data):
#print(x_data.shape)
result=[]
# Mehmet Pekmezci. : make combination
for i in range(self.word2vec_window_size):
row_i=x_data[:,i,:]
x_data[:,i,:]=x_data[:,int((i+1)%self.word2vec_window_size),:]
x_data[:,int((i+1)%self.word2vec_window_size),:]=row_i
x_data_window=np.reshape(x_data,(self.mini_batch_size,int(self.number_of_time_slices/self.word2vec_window_size),self.word2vec_window_size,self.time_slice_length))
## switch axes of batch_size and parallel_lstms, then convert it to list according to first axis. --> this will give us list of matrices of shape (mini_batch_size,lstm_time_steps,time_slice_lentgh)
x_list=np.swapaxes(x_data_window,0,1).tolist()
result=result+x_list
return np.random.permutation(result)
def augment_random(self,x_data):
augmented_data= np.zeros([x_data.shape[0],x_data.shape[1]],np.float32)
for i in range(x_data.shape[0]) :
choice=np.random.rand()*20
augmented_data[i]=x_data[i]
# 10 percent of being not augmented , if equals 0, then not augment, return directly real value
if choice%10 != 0 :
SPEED_FACTOR=0.8+choice/40
TRANSLATION_FACTOR=int(1000*choice)+1
INVERSE_FACTOR=choice%2
if INVERSE_FACTOR == 1 :
augmented_data[i]=-augmented_data[i]
augmented_data[i]=self.augment_speedx(augmented_data[i],SPEED_FACTOR)
augmented_data[i]=self.augment_translate(augmented_data[i],TRANSLATION_FACTOR)
#augmented_data[i]=self.augment_volume(augmented_data[i],VOLUME_FACTOR)
return augmented_data
'''
def generate_single_synthetic_sample(self,single_data):
generated_data=single_data.copy()
randomValue=np.random.rand()
number_of_frequencies=int(randomValue*20)
#print("generated_data[0:TIME_SLICE]="+str(generated_data[0:TIME_SLICE]))
#print("number_of_frequencies:"+str(number_of_frequencies))
for i in range(number_of_frequencies):
randomValue=np.random.rand()
frequency=randomValue*10000 # this generates 0-10000 float number, from uniform dist.
# frequencies between 10000-20000 is not heard well . so we ignore them. Also sampling rate 22050 only allows to detect TIME_SLICE frequency.
duration=randomValue*4 # this generates 0-4 float number, from uniform dist.
volume=randomValue*5
#volume=5
sine_cosine_choice=int(randomValue*2)
frequency_data=2*np.pi*np.arange(88200)*frequency/22050
if sine_cosine_choice == 0 :
wave_data = (np.sin(frequency_data)).astype(np.float32)
else :
wave_data = (np.cos(frequency_data)).astype(np.float32)
current_frequency_data=volume*wave_data
start_point=int(randomValue*2000)
#start_point=0
#if start_point <= self.time_slice_length :
# print("frequency-"+str(i)+":"+str(frequency)+" start_point:"+str(start_point))
generated_data[start_point:start_point+current_frequency_data.shape[0]]+=current_frequency_data[0:int(current_frequency_data.shape[0]-start_point)]
#print("generated_data[0:TIME_SLICE]="+str(generated_data[0:TIME_SLICE]))
return generated_data
def augment_random(self,x_data):
augmented_data=np.zeros([x_data.shape[0],x_data.shape[1]],np.float32)
for i in range(x_data.shape[0]) :
augmented_data[i]=self.generate_single_synthetic_sample(x_data[i])
return augmented_data
'''
| [
"mehmet.pekmezci.32@gmail.com"
] | mehmet.pekmezci.32@gmail.com |
9123640f03d71649f31c4a6740ba9d1d3eca5caf | 52093359b9fe511dbd1e14347e9c176d56443b20 | /17.Hashmap/Mini Project/project_script_generator.py | b73ceff57833a9e9225bac3f1547f8164f375d7f | [] | no_license | Zioq/Algorithms-and-Data-Structures-With-Python | 3e798d463d46cb850b178f6781f46ba3ef529789 | ab582e67fad9c2620f5a7e19fbfddd44fb910d1a | refs/heads/master | 2023-03-26T06:30:25.539176 | 2021-03-12T22:29:13 | 2021-03-12T22:29:13 | 327,744,226 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,969 | py | # Application usage
'''
- In application you will have to load data from persistent memory to working memory as objects.
- Once loaded, you can work with data in these objects and perform operations as necessary
Exmaple)
1. In Database or other data source
2. Load data
3. Save it in data structure like `Dictionary`
4. Get the data from the data structure and work with process data as necessary
5. Produce output like presentation or update data and upload to the Database etc
In this project we follow those steps like this
1. Text file - email address and quotes
2. Load data
3. Populate the AlgoHashTable
4. Search for quotes from specific users
5. Present the data to the console output
'''
# Eamil address and quotes key-value data generator
from random import choice
from string import ascii_lowercase as letters
list_of_domains = ['yaexample.com','goexample.com','example.com']
quotes = [ 'Luck is what happens when preparation meets opportunity',
'All cruelty springs from weakness',
'Begin at once to live, and count each separate day as a separate life',
'Throw me to the wolves and I will return leading the pack']
def generate_name(lenght_of_name):
return ''.join(choice(letters) for i in range(lenght_of_name))
def get_domain(list_of_domains):
return choice(list_of_domains)
def get_quotes(list_of_quotes):
return choice(list_of_quotes)
def generate_records(length_of_name, list_of_domains, total_records, list_of_quotes):
with open("data.txt", "w") as to_write:
for num in range(total_records):
key = generate_name(length_of_name)+"@"+get_domain(list_of_domains)
value = get_quotes(quotes)
to_write.write(key + ":" + value + "\n")
to_write.write("mashrur@example.com:Don't let me leave Murph\n")
to_write.write("evgeny@example.com:All I do is win win win no matter what!\n")
generate_records(10, list_of_domains, 100000, quotes)
| [
"jr.roberthan@gmail.com"
] | jr.roberthan@gmail.com |
bfeb3a9e5da77ce573c6c811a17aee6d08cdcd5d | 4caef6b907e4aee94d3e1219e600fda16e19c29d | /build_reconstructor.py | afb07a29bbb2e95a131c7b2f78fcc0c5bf73692c | [] | no_license | jizhihang/cnn_vlm | c61c936a50f72ff933a41d9afbf4d6e88e1c7d30 | ab9f37cf46183fc0a38c8eb4af107a46693b4ab7 | refs/heads/master | 2021-05-30T05:05:35.577435 | 2015-11-27T16:06:35 | 2015-11-27T16:06:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,178 | py | import sys
import cPickle as pickle
import numpy as np
import theano
import theano.tensor as T
import solvers
def run(
model_name='alexnet',
layer_names=['data', 'conv1', 'conv2', 'conv3', 'conv4', 'conv5'],
layer_target=['conv1', 'conv2', 'conv3', 'conv4', 'conv5'],
image_size=(227, 227),
target_layer='fc8',
):
# paramters
dim_target = 1000
directory = './theanomodel'
filename_model = (
'%s/%s_vlm_%s.pkl' %
(directory, model_name, '_'.join(layer_names))
)
filename_save = (
'%s/%s_vlm_%s_reconstructor.pkl' %
(directory, model_name, '_'.join(layer_names))
)
# hyper-parameters
lambda_loss = theano.shared(np.zeros(2))
lambda_layer = theano.shared(np.zeros(len(layer_target)))
#
model = pickle.load(open(filename_model))
# input
x_init = np.zeros((1, 3, image_size[0], image_size[1])).astype(np.float32)
x = theano.shared(x_init, borrow=False)
xx = T.tensor4()
xx_shared = theano.shared(np.zeros(x_init.shape).astype(np.float32))
T.Apply(T.add, [x + xx], [model['data']])
# loss_target
target_shared = theano.shared(np.zeros(dim_target).astype(np.float32))
mean_std = pickle.load(open(
'%s/%s_mean_std_%s.pkl' %
(directory, model_name, target_layer)
))
loss_target = ((
(model[target_layer] - target_shared[None, :, None, None]) /
mean_std['std'][None, :, None, None]
)**2).mean()
# loss_lm
loss_lm = T.sum([
lambda_layer[i] * model['loss_lm_%s' % layer_target[i]]
for i in range(len(layer_target))
])
# total loss
loss = (
lambda_loss[0] * loss_target +
lambda_loss[1] * loss_lm
)
# functions
solver = solvers.SGD(loss, [], [x], givens={xx: xx_shared})
# save
data = {
'solver': solver,
'x': x,
'lambda_loss': lambda_loss,
'lambda_layer': lambda_layer,
'target': target_shared,
}
sys.setrecursionlimit(100000)
pickle.dump(
data,
open(filename_save, 'wb'),
protocol=pickle.HIGHEST_PROTOCOL,
)
if __name__ == '__main__':
run()
| [
"hiroharu.kato.1989.10.13@gmail.com"
] | hiroharu.kato.1989.10.13@gmail.com |
e844099f0db3fc837e702a914f731dc05126c3c3 | 68fc7e3b57680ecdd58e16fbe9467480c0d1506a | /python/prnr_analysis.py | 36287fa2f02bf24d883ad0029da9bcb6c0ddc7a8 | [] | no_license | paramitamirza/AudiHackathon2017 | 6ccbe4cc6a6a62d9b84e25ff497daeb66131aea5 | e079505bba391b540302458a0adc3b2fdfee1217 | refs/heads/master | 2021-07-07T15:13:38.234328 | 2017-10-05T14:13:29 | 2017-10-05T14:13:29 | 105,838,667 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,236 | py | import csv
import numpy as np
import pandas as pd
csv = np.genfromtxt('../data/prnr_einzeln.csv',delimiter=";",dtype=None)
pnrdata = csv[1:]
#print (pnrdata)
out = open("../data/prnr_clean.csv", "w")
prfam = {}
i = 1
for row in pnrdata:
prnrstr = row[1]
for cols in prnrstr[2:].decode('UTF-8').split("~3L~"):
feat = cols.replace("~3L", "")
feat = feat.replace("~", "_")
feat = feat.replace("\"", "")
feat_fam = feat.split("_")[0]
if feat_fam not in prfam:
prfam[feat_fam] = i
i += 1
print (prfam)
out.write("KNR")
for fam in prfam:
out.write("," + fam)
out.write("\n")
for row in pnrdata:
car_id = row[0]
prnrstr = row[1]
prnrlist = []
for i in range(len(prfam) + 1):
prnrlist.append("")
prnrlist[0] = car_id.decode('UTF-8')
for cols in prnrstr[2:].decode('UTF-8').split("~3L~"):
feat = cols.replace("~3L", "")
feat = feat.replace("~", "_")
feat = feat.replace("\"", "")
fam = feat.split("_")[0]
feat = feat.split("_")[1]
prnrlist[prfam[fam]] = feat
out.write (",".join(prnrlist) + "\n")
out.close()
df = pd.read_csv('pandas_dataframe_importing_csv/example.csv')
| [
"paramita135@gmail.com"
] | paramita135@gmail.com |
65cf126f02d6596aa54439c2ab4cebc29efc5218 | 626dbf669672eccd2e97fb225549615289af34d9 | /users/urls.py | 517f1045dc3192ae62d582e105bbd9eae1fc1497 | [] | no_license | dbwei/relationship | 6a0692c92898f24b6445fa20a2a35da5918e70ff | 0174f2da79a90cbfa2d355d7f62d2730748fd941 | refs/heads/master | 2021-05-14T09:38:52.629179 | 2018-01-08T05:01:33 | 2018-01-08T05:01:33 | 116,328,609 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 314 | py | from django.conf.urls import url
from django.contrib.auth.views import login
from . import views
urlpatterns = [
url(r'^login/$', login, {'template_name': 'users/login.html'}, name='login'),
url(r'^logout/$', views.logout_view, name='logout'),
url(r'^register/$', views.register, name='register'),
] | [
"13109592605@163.com"
] | 13109592605@163.com |
edd66ab7bc8def989337f3754c6c61175a5435f5 | dba2cf5de4d2b198465674e90c97fd549071857d | /Others/Netsquid_Lib.py | 01913d10815f70d0636027fb563b699e9e035893 | [] | no_license | h-oll/netsquid-private | 80202023d69562c50ea0d738c08a60537770f766 | a894c7c8b1dfc60e70171493991e9cc4f9ac12d3 | refs/heads/master | 2022-04-29T19:45:50.407566 | 2022-04-24T17:03:27 | 2022-04-24T17:03:27 | 224,158,119 | 1 | 1 | null | 2020-12-08T15:11:57 | 2019-11-26T09:56:25 | Python | UTF-8 | Python | false | false | 12,258 | py | #!/usr/bin/env python
# coding: utf-8
# In[16]:
import numpy as np
import netsquid as ns
from netsquid.qubits.qubitapi import *
'''
function:
Generate a GHZ set with customized length and entangled qubits.
input:
num_qubits: Numbers of qubits in a column.
num_sets: Numbers of qubits in a raw.
output:
A 2-D arrary of qubits with every one in the same raw entangled.
'''
def Create_GHZ_set_list(num_qubits,num_sets):
qList_2D=[]
for i in range(num_qubits):
qList = create_qubits(num_sets) #qubit 000
H | qList[0]
tmp=[qList[0]]
for j in range(1, num_sets):
ns.qubits.operate([qList[0],qList[j]], ns.CNOT)
tmp.append(qList[j])
qList_2D.append(tmp)
return qList_2D
# In[1]:
#Verify
from netsquid.qubits.operators import *
tmp=Create_GHZ_set_list(5,3)
print(tmp)
np.asarray(tmp)
print(tmp)
print(tmp[:,2])
mes0=ns.qubits.qubitapi.measure(tmp[2][0],observable=Z)
mes1=ns.qubits.qubitapi.measure(tmp[2][1],observable=Z)
mes2=ns.qubits.qubitapi.measure(tmp[2][2],observable=Z)
print(mes0)
print(mes1)
print(mes2)
# In[108]:
'''
function:
Generate a random serial number list.
input:
num_qubits: Length of serial number.
min: Minimum value possible in the list.
max: Maximum value possible in the list.
output:
A random serial number list.
'''
from random import randint
def SerialNumGen(num_bits,min,max):
#seed()
bitList=[]
startUSN=randint(min,max-num_bits+1)
for i in range(num_bits):
bitList.append(startUSN)
startUSN+=1
return bitList
# In[114]:
#verify
SerialNumGen(7,0,10)
# In[12]:
'''
function:
One way function which can be used in many place.
input:
any
output:
A qubit in this case.
'''
from netsquid.qubits import create_qubits
from netsquid.qubits.operators import *
# hash with the symmetric key, Unique Serial Number, the amound of money
def OneWayFunction(identity=None,symkey=[],randomSerialNumber=0,Money=0):
owf_key=''
# covert inputs to binary
for i in symkey:
owf_key+=str(bin(i)[2:])
owf_key+=str(bin(randomSerialNumber)[2:])
owf_key+=str(bin(Money)[2:])
owf_key=int(owf_key)
# make it qubit
# apply three big prime numbers
p1 = 33179
p2 = 32537
p3 = 31259
MyRx=create_rotation_op(np.pi/180*(owf_key%p1), (1, 0, 0))
MyRy=create_rotation_op(np.pi/180*(owf_key%p2), (0, 1, 0))
MyRz=create_rotation_op(np.pi/180*(owf_key%p3), (0, 0, 1))
tempQubit=create_qubits(1)
tempQubit=tempQubit[0]
MyRx | tempQubit
MyRy | tempQubit
MyRz | tempQubit
#print(tempQubit.qstate.dm)
return tempQubit
# In[ ]:
'''
function:
Cswap function.
input:
Three qubits.
output:
Three qubits applied Cswap.
'''
# C swap can be composed by T,H
# see https://www.mathstat.dal.ca/~selinger/quipper/doc/QuipperLib-GateDecompositions.html
from netsquid.qubits.operators import H,T
def Cswap(qA,qB,qC):
invT=T.inv
operate([qC, qB], ops.CNOT)
H | qC
T | qA
T | qB
T | qC
operate([qB, qA], ops.CNOT)
operate([qC, qB], ops.CNOT)
operate([qA, qC], ops.CNOT)
T | qC
invT | qB
operate([qA, qB], ops.CNOT)
invT | qA
invT | qB
operate([qC, qB], ops.CNOT)
operate([qA, qC], ops.CNOT)
operate([qB, qA], ops.CNOT)
H | qC
operate([qC, qB], ops.CNOT)
return qA,qB,qC
# In[11]:
'''
function:
Swap test which exames the closeness of two qubits.
input:
two qubits.
output:
A tuple indecating the index and pobability.
(0,0.5) means orthogonal.
(0,1) means the two are equal.
'''
from netsquid.qubits import create_qubits
from netsquid.qubits.operators import H,Z
def SwapTest(qB,qC):
qA=create_qubits(1)
qA=qA[0]
H | qA
Cswap(qA,qB,qC)
H | qA
return ns.qubits.qubitapi.measure(qA,observable=Z)
# In[9]:
'''
function:
Create qubits list.
input:
numbers of qubits.
output:
A list of quantum states.(0,1,+,-)
And corespond quantum list.
'''
from netsquid.qubits import create_qubits
from random import randint
from netsquid.qubits.operators import H,X
def Create_random_qubits(num_bits):
res_state=[]
qlist=[]
qlist=create_qubits(num_bits)
for i in range(0,num_bits):
res_state.append(randint(0,3)) # in four states
for a,b in zip(res_state, qlist):
if a == 0: # 0 state
pass
elif a == 1: # 1 state #X
X | b
elif a == 2: # + state #H
H | b
elif a == 3: # - state #XH
X | b
H | b
else :
print("Create random bits ERROR!!")
return res_state, qlist
# In[13]:
'''
function:
Measuring qubits according to certain basis.
Names of qubits need to be indexed from 0
input:
A list of basis consised by 0/1. (0:standard, 1:Hadamard)
A list of qubits.
output:
A list of measurment tuple accordingly. Return merely 0 means missing such qubits
'''
import netsquid as ns
def Measure_by_basis(basisList,qList):
if len(basisList)<len(qList):
print("Quantum list is too long! ERROR!!")
return 0
else:
res_measurement=[0]*len(basisList) #init to 0
for q in qList:
pos=int(q.name[5:]) #get qubit index #defalt first qubit name = QS#0-0
if basisList[pos]==0:
res_measurement[pos]=ns.qubits.qubitapi.measure(q,observable=Z) #measure in standard basis
elif basisList[a]==1:
res_measurement[pos]=ns.qubits.qubitapi.measure(q,observable=X) #measure in Hadamard basis
else:
print("measuring ERROR!!\n")
return res_measurement
# In[ ]:
'''
function:
Wait certain amout of simulated time in simulation
This is the way NetSquid implements waiting action in simulated time.
By customizing a wait event, it will call End_waiting function after waiting.
More example at https://github.com/h-oll/netsquid-private/blob/master/Others/QMemory/QMemoryNoiceSim.py
'''
class example_class():
def example_function:
# Put folowing lines in functions you want to wait.
My_waitENVtype = EventType("WAIT_EVENT", "Wait for N nanoseconds")
self._schedule_after(customized_delay, My_waitENVtype) # customized_delay
self._wait_once(ns.EventHandler(self.End_waiting),entity=self,event_type=My_waitENVtype)
# Put above lines in functions you want to wait.
# called after qaiting
def End_waiting(self,event):
#continue your protocol
# In[ ]:
'''
Assuming that qubit flip happens less likely than not flipping.
Correct qubit according to majority without measuring them.
input:
Qubit lists to compare and correct when qubit flips.
Idealy Qlist1=Qlist2=Qlist3.
Same column in different Qlist will be corrected accoring to majority.
output:
Corrected Qubit list
'''
import numpy as np
import netsquid as ns
from netsquid.qubits import create_qubits
from netsquid.qubits.operators import *
def QBitCorrection(Qlist1,Qlist2,Qlist3):
ret=[]
for q1,q2,q3 in zip(Qlist1,Qlist2,Qlist3):
Qlist=[q1,q2,q3]
# Qlist
# get Error Syndrome
ErrorSyndromeLen=len(Qlist)-1
ES_Qlist=create_qubits(ErrorSyndromeLen)
#print(ES_Qlist)
mes=[]
for i in range(ErrorSyndromeLen):
ns.qubits.operate([Qlist[i],ES_Qlist[i]], ns.CNOT)
ns.qubits.operate([Qlist[i+1],ES_Qlist[i]], ns.CNOT)
mes.append(ns.qubits.qubitapi.measure(ES_Qlist[i],observable=Z)[0])#
#print(mes)
# get Qlist idea from Error Syndrome
res=[True]*len(Qlist)
ind=True
for i in range(len(mes)):
if mes[i]==1:
ind= not ind
res[i+1]=ind
else:
res[i+1]=ind
# count false cases
F_count=0
for i in res:
if i ==False:
F_count+=1
# correct qubits
if 2*F_count>len(mes): # case that false is more than true, than false might be the correct ones.
for i in range(len(res)):
if res[i] == True:
X|Qlist[i]
else:
for i in range(len(res)):
if res[i] == False:
X|Qlist[i]
ret.append(Qlist[0])
return ret
# In[ ]:
#Verify
qlist1=create_qubits(7)
qlist2=create_qubits(7)
qlist3=create_qubits(7)
#X|qlist1[5]
X|qlist2[5]
X|qlist3[5]
#X|qlist[0]
#X|qlist[2]
#X|qlist[1]
for i in qlist1:
print(ns.qubits.qubitapi.measure(i,observable=Z))
print("--------")
res=QBitCorrection(qlist1,qlist2,qlist3)
for i in res:
print(ns.qubits.qubitapi.measure(i,observable=Z))
# In[ ]:
'''
Create EPR pairs.
input:
Numbers of pairs.
output:
Two lists of qubits, with the corresponding slots entangled.
'''
import netsquid as ns
from netsquid.qubits import create_qubits
from netsquid.qubits.operators import *
def Create_multiEPR(num_bits):
qListA=[]
qListB=[]
for i in range(num_bits):
qA, qB = create_qubits(2) # qubit 00
ns.qubits.operate(qA, ns.H)
ns.qubits.operate([qA,qB], ns.CNOT)
qListA.append(qA)
qListB.append(qB)
return qListA, qListB
# In[ ]:
# Verify
AA,BB=Create_multiEPR(5)
mes=ns.qubits.qubitapi.measure(AA[2],observable=Z)
for i in range(0,4):
print(AA[i].qstate.dm)
print(BB[i].qstate.dm)
print(mes)
# In[ ]:
'''
Compare two lists, find the unmatched index, then remove corresponding slots in loc_meas.
Input:
Two lists with elements 0-2 (0:Z, 1:X, 2:qubit miss).
Output:
measurement result left.
'''
def Compare_basis(loc_basis_list,res_basis_list,loc_meas):
if len(loc_basis_list) != len(res_basis_list):
print("Comparing error! length issue!")
print(loc_basis_list)
print(res_basis_list)
return -1
popList=[]
for i in range(len(res_basis_list)):
if loc_basis_list[i] != res_basis_list[i]:
popList.append(i)
for i in reversed(popList):
if loc_meas:
loc_meas.pop(i)
return loc_meas
# In[ ]:
# Verify
a=[1,2,3]
b=[4,2,6]
c=[7,8,9]
Compare_basis(a,b,c)
print(c)
# In[42]:
import netsquid as ns
from random import randint
from netsquid.qubits.operators import X,Z
'''
Randomly measure a qubits list by Z or X basis.
Input:
Numbers of qubits that should be >= the length of qlist. Equal case happens when no loss.
Qubit list to measure.
Output:
basisList: A list of basis applied(Z X -1). -1 means qubit missing. (detect by qubit name)
loc_res_measure: A list of measurment results. If there's a qubit loss,
both opList and loc_res_measure will have value -1 in the such slot in the list.
'''
def Random_ZX_measure(num_bits,qlist):
num_start=int(qlist[0].name[-len('-'):])# get value after qubit name "QS#<i>-n"
basisList = []*num_bits # set boundary
loc_res_measure=[]*num_bits # set boundary
ind=0
for i in range(num_start,num_start+num_bits):
if ind <= len(qlist)-1:
if int(qlist[ind].name[-len('-'):]) == i:
rbit = randint(0,1) # 0:Z 1:X
if rbit:
basisList.append('X')
loc_res_measure.append(ns.qubits.qubitapi.
measure(qlist[ind],observable=X)[0]) #measure in Hadamard basis
else:
basisList.append('Z')
loc_res_measure.append(ns.qubits.qubitapi.
measure(qlist[ind],observable=Z)[0]) #measure in standard basis
ind+=1
else:
basisList.append(-1)
loc_res_measure.append(-1)
return basisList,loc_res_measure
# In[47]:
# verify
from netsquid.qubits import create_qubits
qList = create_qubits(4)
qList2 = create_qubits(3)
qList2.pop()
qList.extend(qList2)
print(qList)
oplist,mes=Random_ZX_measure(6,qList)
print(oplist,mes)
# In[ ]:
| [
"liao.chinte@gmail.com"
] | liao.chinte@gmail.com |
8bc28737791931b0e85dcc7d9c3af83166098f3e | 7473bb17319b6ae146e64e575b32f273fcdb838d | /pages/views.py | 1a1ec7985137f939f1307685bc8512453b5f843a | [] | no_license | hyperxpizza/blognasterdyach | fd3ec4764065e53bc088b19e407e32189c4715dd | ce2e9138e04c55036bf37e079cd7036a7eac8739 | refs/heads/master | 2020-08-07T00:49:54.199347 | 2019-10-06T19:06:59 | 2019-10-06T19:06:59 | 213,227,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 367 | py | from django.shortcuts import render
from .forms import ContactForm
def contact(request):
if request.method == "POST":
form = ContactForm(request.POST)
if form.is_valid():
cd = form.cleaned_data
else:
form = ContactForm()
context = {
'form': form
}
return render(request, 'pages/contact.html', context) | [
"wojtek.frackowski@gmail.com"
] | wojtek.frackowski@gmail.com |
89e9601a52fd5d4f0fb45ca1a91716563f0c1b32 | c3a620d09bd447a38f043449d9ccebc09f5cfb2d | /src/src/profile_project/profiles_api/migrations/0003_auto_20200604_0244.py | cf3110064eb257c6516110296556da0997b6470d | [
"Apache-2.0"
] | permissive | gustavoghp87/django-rest-api | a295391fe3cf5da0d9c79a91290a948efc0c38a7 | 3cdd898c05e68d3e07970865cea2b5ea1a81cbd1 | refs/heads/master | 2022-12-11T13:28:20.240376 | 2020-09-11T06:12:39 | 2020-09-11T06:12:39 | 294,594,819 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 384 | py | # Generated by Django 3.0.6 on 2020-06-04 02:44
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('profiles_api', '0002_profilefeeditem'),
]
operations = [
migrations.RenameField(
model_name='profilefeeditem',
old_name='status_taex',
new_name='status_text',
),
]
| [
"ghp.2120@gmail.com"
] | ghp.2120@gmail.com |
78038690c1b1ca1682b35350e48977d2ec09e397 | 1da52863c3a4b2538f90a87bbd237444dad5f382 | /sram.py | 0ff3e8f44a96099b89ca45e83b28702c6f4a273d | [
"MIT"
] | permissive | AmyBeisel/SRAM_ASSESSMENT | 8c123a428525f9a43b193578e6113836c3e84d17 | eabb73d5f24476e8628723f00d92529d853309cd | refs/heads/main | 2023-03-27T08:54:12.285466 | 2021-03-31T23:39:25 | 2021-03-31T23:39:25 | 350,150,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,949 | py |
import click
import requests
@click.command()
@click.option('--list', is_flag=True, help="will print everything")
@click.option('--bike_id',default=False, help="Enter the id of the bike you want to select")
@click.option('--fw_id', default=False, help="Enter the id of the front wheel you want to select")
@click.option('--rw_id', default=False, help="Enter the id of the rear wheel you want to select")
def main(list, bike_id, fw_id, rw_id):
if list == True:
bikes = requests.get("http://localhost:8000/Bike").json()
click.echo('\nBikes')
for b in bikes:
click.echo(f"- Id: {b['id']}, Brand: {b['brand']}, Type: {b['bike_type']} , Weight: {b['weight']}")
click.echo('\nFront Wheel')
fw = requests.get("http://localhost:8000/FrontWheel/").json()
for f in fw:
click.echo(f"- Id: {f['id']}, Brand: {f['brand']}, Size: {f['size']}, Weight: {f['weight']}")
click.echo('\nRear Wheel')
rw = requests.get("http://localhost:8000/RearWheel/").json()
for r in rw:
click.echo(f"- Id: {r['id']}, Brand: {r['brand']}, Size: {r['size']}, Weight: {r['weight']}")
if bike_id and fw_id and rw_id:
click.echo('\n This shows the selected bike, front wheel, rear wheel and weight')
abike = requests.get(f"http://localhost:8000/Bike/{bike_id}").json()
click.echo(abike)
afw = requests.get(f"http://localhost:8000/FrontWheel/{fw_id}").json()
click.echo(afw)
arw = requests.get(f"http://localhost:8000/RearWheel/{rw_id}").json()
click.echo(arw)
total_weight = abike['weight'] + afw['weight'] + arw['weight']
#click.echo(total_weight)
click.echo(f"\n You choose a great setup:\n - bike: {abike['brand']}, Front Wheel: {afw['brand']}, Rear Wheel: {arw['brand']}, total weight: {total_weight}")
if __name__ == "__main__":
main()
| [
"amy.beisel@gmail.com"
] | amy.beisel@gmail.com |
e943a0ec91cac2f2974684df1bc400715a012782 | 0a537a35e48bd35cb3ef2ce5c280f778ba59af1e | /bikeshare.py | 04ed6ab4508338662618ec6ffcd74ab4a865493a | [] | no_license | MideOludoyi/pdsnd_github | 94b4b7bc55a48de8ec34e88671a274be9f296c9e | c09c6703807af608ad2c1907683f93f87d6e0d02 | refs/heads/master | 2023-08-06T04:53:15.359234 | 2021-10-10T14:57:46 | 2021-10-10T14:57:46 | 415,579,587 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,748 | py | import time
import pandas as pd
import numpy as np
CITY_DATA = { 'chicago': 'chicago.csv',
'new york city': 'new_york_city.csv',
'washington': 'washington.csv' }
def get_filters():
"""
Asks user to specify a city, month, and day to analyze.
Returns:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
"""
print('Hello! Let\'s explore some US bikeshare data!')
# TO DO: get user input for city (chicago, new york city, washington). HINT: Use a while loop to handle invalid inputs
while True:
city = input("\nWhich city would you like to explore? New York City, Chicago City or Washington City?\n").lower()
if city not in ('new york city', 'chicago', 'washington'):
print("Please enter a valid city. Try again.")
continue
else:
break
# TO DO: get user input for month (all, january, february, ... , june)
while True:
month = input("\nWhich month would you like to explore? January, February, March, April, May, June? If you would like to explore all months, input 'all'\n").title()
if month not in ('January', 'February', 'March', 'April', 'May', 'June', 'all'):
print("Please enter a valid month. Try again.")
continue
else:
break
# TO DO: get user input for day of week (all, monday, tuesday, ... sunday)
while True:
day = input("\nWhich day would you like to explore? Sunday, Monday, Tuesday, Wednesday, Thursday, Friday, Saturday? If you would like to explore all days, input 'all'\n").title()
if day not in ('Sunday', 'Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday', 'Saturday', 'all'):
print("Please enter a valid day. Try again.")
continue
else:
break
print('-'*40)
return city, month, day
def load_data(city, month, day):
"""
Loads data for the specified city and filters by month and day if applicable.
Args:
(str) city - name of the city to analyze
(str) month - name of the month to filter by, or "all" to apply no month filter
(str) day - name of the day of week to filter by, or "all" to apply no day filter
Returns:
df - Pandas DataFrame containing city data filtered by month and day
"""
# Load data into a dataframe
df = pd.read_csv(CITY_DATA[city])
# Convert the Start Time column to datetime
df['Start Time'] = pd.to_datetime(df['Start Time'])
# Derive month and day from Start Time and make them a new column
df['month'] = df['Start Time'].dt.month
df['day_of_week'] = df['Start Time'].dt.weekday_name
# Filter by month
if month != 'All':
#Use index to derive the month number
months = ['January', 'February', 'March', 'April', 'May', 'June']
month = months.index(month) + 1
# Add month to the dataframe if it exist
df = df[df['month'] == month]
# Add day of week to df if it exist
df = df[df['day_of_week'] == day.title()]
return df
def time_stats(df):
"""Displays statistics on the most frequent times of travel."""
print('\nCalculating The Most Frequent Times of Travel...\n')
start_time = time.time()
# TO DO: display the most common month
common_month = df['month'].mode()[0]
print('Most Common Month:', common_month)
# TO DO: display the most common day of week
common_day = df['day_of_week'].mode()[0]
print('Most Common day:', common_day)
# TO DO: display the most common start hour
df['hour'] = df['Start Time'].dt.hour
common_hour = df['hour'].mode()[0]
print('Most Common Hour:', common_hour)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def station_stats(df):
"""Displays statistics on the most popular stations and trip."""
print('\nCalculating The Most Popular Stations and Trip...\n')
start_time = time.time()
# TO DO: display most commonly used start station
start_station = df['Start Station'].value_counts().idxmax()
print('Most Commonly used start station:', start_station)
# TO DO: display most commonly used end station
end_station = df['End Station'].value_counts().idxmax()
print('\nMost Commonly used end station:', end_station)
# TO DO: display most frequent combination of start station and end station trip
combination_ctation = df.groupby(['Start Station', 'End Station']).count()
print('\nMost frequent combination of start station and end station trip:', start_station, " & ", end_station)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def trip_duration_stats(df):
"""Displays statistics on the total and average trip duration."""
print('\nCalculating Trip Duration...\n')
start_time = time.time()
# TO DO: display total travel time
total_travel_time = sum(df['Trip Duration'])
print('Total travel time:', total_travel_time/86400, " Days")
# TO DO: display mean travel time
mean_travel_time = df['Trip Duration'].mean()
print('Mean travel time:', mean_travel_time/60, " Minutes")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def user_stats(df):
"""Displays statistics on bikeshare users."""
print('\nCalculating User Stats...\n')
start_time = time.time()
# TO DO: Display counts of user types
user_types = df['User Type'].value_counts()
#print(user_types)
print('User Types:\n', user_types)
# TO DO: Display counts of gender
try:
gender_category = df['Gender'].value_counts()
print('\nGender Category:\n', gender_category)
except KeyError:
print("\nGender Types:\nThere is no data available for this month")
# TO DO: Display earliest, most recent, and most common year of birth
try:
earliest_Year = df['Birth Year'].min()
print('\nEarliest Year:', earliest_Year)
except KeyError:
print("\nEarliest Year:\nThere is no data available for this month.")
try:
most_recent_year = df['Birth Year'].max()
print('\nMost Recent Year:', most_recent_year)
except KeyError:
print("\nMost Recent Year:\nThere is no data available for this month.")
try:
most_common_year = df['Birth Year'].value_counts().idxmax()
print('\nMost Common Year:', most_common_year)
except KeyError:
print("\nMost Common Year:\nThere is no data available for this month.")
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
print("\nThis took %s seconds." % (time.time() - start_time))
print('-'*40)
def display_data(df):
print(df.head())
start_loc = 0
while True:
view_data = input('\nWould you like to view next five row of raw data? Enter yes or no.\n')
if view_data.lower() != 'yes':
return
start_loc = start_loc + 5
print(df.iloc[start_loc:start_loc+5])
print("You have just viewed a sample of the data requested")
#display data function can be added to the function below
def main():
while True:
city, month, day = get_filters()
df = load_data(city, month, day)
time_stats(df)
station_stats(df)
trip_duration_stats(df)
user_stats(df)
restart = input('\nWould you like to restart? Enter yes or no.\n')
if restart.lower() != 'yes':
break
if __name__ == "__main__":
main()
| [
"oludoyimayowa1@gmail.com"
] | oludoyimayowa1@gmail.com |
f9f87f36e42fc639ea3a6e5bfc111d52fdd927b0 | cd6f5bc831396005a16247ad9f24bf375daa7a56 | /01-Lists/4.13_Buffet.py | e01345604192a572529303858a7c71028ad71b92 | [] | no_license | fiolisyafa/CS_ITP | dbdb20a03c9a2f79169676b591d98f18f2801b33 | 55743b5f8dfc3f3c440c346e96e67737801322a6 | refs/heads/master | 2020-03-07T03:03:06.168208 | 2018-03-29T03:49:29 | 2018-03-29T03:49:29 | 127,224,541 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 331 | py | buffet = ("rice", "chicken", "vegetables", "soup", "fruits")
print("Original menu:")
for food in buffet:
print(food)
'''
buffet[3] = "candy'
print(buffet)
#The above command got rejected
'''
modified_menu = ("rice", "chicken", "vegetables", "noodle", "fish")
print("\nModified menu:")
for food in modified_menu:
print(food)
| [
"fiolisyaambadar@gmail.com"
] | fiolisyaambadar@gmail.com |
bd89296c0898079758feab706b83a4adb776ee0a | d93423d158ce6d504045a8629ea0adce800fd04d | /src/LinearFunctionApproximation.py | 650b8483f23a5361f52a655ccbd613e174485af4 | [] | no_license | kensk8er/easy21 | 5257bf3cba9594a64ecd6c2c4f0e0f3f128c221d | aba945cf918e317207c651c6166c4885e4b9f30f | refs/heads/master | 2020-12-25T17:45:47.928824 | 2014-03-31T21:11:59 | 2014-03-31T21:11:59 | 21,072,157 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,823 | py | import pylab
from easy21.environment import step, State
from easy21.plot import plot_linear_value_function
from easy21.policy import epsilon_greedy_linear
from easy21.value import ActionValueLinearApproximation, LinearFunction
from utils.util import *
from easy21 import environment
from collections import defaultdict
import numpy as np
__author__ = 'kensk8er'
def calculate_mse(subject_function):
true_result = unpickle('result/MonteCarloControl.pkl')
true_action_value_function = true_result['action_value']
linear_function = LinearFunction()
# calculate the MSE
MSE = 0
denominator = 0
for dealer in range(1, 11):
for player in range(1, 22):
for action in range(0, 2):
state = State(dealer=dealer, player=player)
linear_function.update(state)
features = linear_function.get_features()
MSE += (subject_function[(features, action)] - true_action_value_function[
(dealer, player, action)]) ** 2
denominator += 1
MSE /= denominator
return MSE
def sarsa(lambda_value, iteration_num):
"""
:rtype : MSE (float)
"""
print 'lambda:', lambda_value
# define functions (dictionaries)
action_value_function = ActionValueLinearApproximation(float)
linear_function = LinearFunction()
parameters_hit = np.array([0 for i in range(3 * 6)])
parameters_stick = np.array([0 for i in range(3 * 6)])
# define parameters
batch = 100
num_zero = 10
epsilon = 0.1
alpha = 0.01
HIT = 0
STICK = 1
if lambda_value == 0. or lambda_value == 1.:
learning_curve = []
# iterate over iteration_num
for episode in xrange(iteration_num):
if episode % batch == 0:
print '\repisode:', episode,
if lambda_value == 0. or lambda_value == 1.:
learning_curve.append(calculate_mse(action_value_function))
# initialize state, action, and eligibility-trace
state = environment.State()
linear_function.update(state)
current_features = linear_function.get_features()
action = epsilon_greedy_linear(action_value_function, current_features, epsilon)
eligibility_hit = np.array([0 for i in range(3 * 6)])
eligibility_stick = np.array([0 for i in range(3 * 6)])
while state.terminal is False:
# update delta, and eligibility-trace
if action == HIT:
eligibility_hit = np.add(eligibility_hit, np.array(current_features))
else:
eligibility_stick = np.add(eligibility_stick, np.array(current_features))
# take an action
reward = step(state, action)
if reward is None:
# assign 0 if the match hasn't finished yet
reward = 0
linear_function.update(state)
new_features = linear_function.get_features()
# update delta
delta_hit = reward - np.array(new_features).dot(parameters_hit)
delta_stick = reward - np.array(new_features).dot(parameters_stick)
# update Action Value Function
if action == HIT:
action_value_function.update_value((new_features, action), parameters_hit)
else:
action_value_function.update_value((new_features, action), parameters_stick)
# update delta, parameters, and eligibility-trace
if action == HIT:
delta_hit += action_value_function[(new_features, HIT)]
else:
delta_stick += action_value_function[(new_features, STICK)]
parameters_hit = np.add(parameters_hit, alpha * delta_hit * eligibility_hit)
parameters_stick = np.add(parameters_stick, alpha * delta_stick * eligibility_stick)
eligibility_hit = eligibility_hit * lambda_value
eligibility_stick = eligibility_stick * lambda_value
# decide an action
action = epsilon_greedy_linear(action_value_function, new_features, epsilon)
# update state and action
current_features = new_features
print '\repisode:', episode
print 'done!'
if lambda_value == 0. or lambda_value == 1.:
learning_curve.append(calculate_mse(action_value_function))
# plot learning curve
if lambda_value == 0. or lambda_value == 1.:
x = range(0, iteration_num + 1, batch)
pylab.title('Learning curve of Mean-Squared Error against episode number: lambda = ' + str(lambda_value))
pylab.xlabel("episode number")
pylab.xlim([0, iteration_num])
pylab.xticks(range(0, iteration_num + 1, batch))
pylab.ylabel("Mean-Squared Error")
pylab.plot(x, learning_curve)
pylab.show()
# calculate MSE
print 'calculate the Mean-Squared Error...'
MSE = calculate_mse(action_value_function)
## value function
#value_function = action_value_function.to_value_function()
## plot the optimal value function
#plot_linear_value_function(action_value_function, "Optimal Value Function (Linear Approximation)")
return MSE
if __name__ == '__main__':
MSE = [0 for i in range(11)]
# iterate over every lambda
for i in range(11):
MSE[i] = sarsa(lambda_value=float(i) / 10, iteration_num=1000)
print "Mean-Squared Error:", MSE[i]
print ''
# plot the mean squared error against lambda
#x = range(11)
x = [round(0.1 * i, 1) for i in range(11)]
pylab.title('Mean-Squared Error against lambda')
pylab.xlabel("lambda")
pylab.xlim([0., 1.])
pylab.xticks([round(0.1 * i, 1) for i in range(11)])
pylab.ylabel("Mean-Squared Error")
pylab.plot(x, MSE)
pylab.show()
| [
"kensk8er1017@gmail.com"
] | kensk8er1017@gmail.com |
a61f46b84c958263ddc0fbf1afb882fabef4f54e | 74860c9551646a0a82302a0830abbf576a6b3d7a | /crystal/migrations/0001_initial.py | dfe700c16b3257c669ec889d9353b5e94001e5bd | [] | no_license | windcrystal0513/AR_tree | e4cfd632c0d2d52d9361a64497a8b8c6ae2ca927 | a04fef6502ed33c7b7a8e06746dd597cac27545f | refs/heads/master | 2020-06-11T16:43:54.440237 | 2019-06-27T05:19:19 | 2019-06-27T05:19:19 | 194,027,357 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 618 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
]
operations = [
migrations.CreateModel(
name='Topic',
fields=[
('id', models.AutoField(verbose_name='ID', primary_key=True, serialize=False, auto_created=True)),
('topic_name', models.CharField(max_length=30)),
('topic_id', models.IntegerField(max_length=10)),
('domain_id', models.IntegerField(max_length=5)),
],
),
]
| [
"wuyunfeng1234123@163.com"
] | wuyunfeng1234123@163.com |
b599e69f4c61b616f7fa40d598b1d7630d4fb187 | c1b415011dbeb625b5724cf1b78fd9d64f32ac07 | /utilities/valid_hdf5.py | 7dc194d25c72e6c5116662eaef8223e405f249fe | [] | no_license | Sunshine352/nag | 2f07a6152503f9a99d8556915d1c4170034f8d00 | 83564eb4a8b5177660e2f6566dd63faa16f76773 | refs/heads/master | 2020-04-05T10:27:46.501267 | 2018-05-24T11:21:18 | 2018-05-24T11:21:18 | 156,799,774 | 1 | 0 | null | 2018-11-09T02:38:43 | 2018-11-09T02:38:43 | null | UTF-8 | Python | false | false | 1,778 | py | from random import shuffle
import glob
import sys
hdf5_path = 'ilsvrc_valid.hdf5'
img_path = '/data1/nithish/WORK/Resources/ilsvrc_valid/*.png'
addrs = glob.glob(img_path)
train_addrs = addrs[0:int(1.0*len(addrs))]
#valid_list = open('full_valid.txt').readlines()
#for i in xrange(len(valid_list)):
# valid_list[i] = valid_list[i].split()[0]
#print "{} {}".format("Length of train list",len(valid_list))
import numpy as np
import h5py
from skimage.io import imread
from skimage.transform import resize
size=224
train_shape = (len(train_addrs), 224, 224, 3)
#val_shape = (len(val_addrs), 224, 224, 3)
hdf5_file = h5py.File(hdf5_path, mode='w')
hdf5_file.create_dataset("valid_img", train_shape, np.float32)
#hdf5_file.create_dataset("val_img", val_shape, np.int8)
#hdf5_file.create_dataset("test_img", test_shape, np.int8)
for i in range(len(train_addrs)):
# print how many images are saved every 1000 images
if i % 100 == 0 and i > 1:
print 'Valid data: {}/{}'.format(i, len(train_addrs))
# read an image and resize to (224, 224)
# cv2 load images as BGR, convert it to RGB
addr = train_addrs[i]
img = imread(addr)
img = resize(img, (224, 224))*255.0
img2 = img
img = img.reshape((1,size,size,3))
hdf5_file["valid_img"][i, ...] = img2
#for i in range(len(val_addrs)):
# print how many images are saved every 1000 images
# if i % 100 == 0 and i > 1:
# print 'Valid data: {}/{}'.format(i, len(val_addrs))
# read an image and resize to (224, 224)
# cv2 load images as BGR, convert it to RGB
# addr = val_addrs[i]
# img = imread(addr)
# img = resize(img, (224, 224))*255.0
# img = img.reshape((1,size,size,3))
# hdf5_file["val_img"][i, ...] = img[None]
hdf5_file.close()
| [
"noreply@github.com"
] | Sunshine352.noreply@github.com |
e76e45b22d05ad732edaa7f089f4e8d0a857017d | 4cb06a6674d1dca463d5d9a5f471655d9b38c0a1 | /jwr300/Assignment4/problem1.py | e61f2907e4f20e2d9136300c1f17b9ce1bd4b459 | [] | no_license | nyucusp/gx5003-fall2013 | 1fb98e603d27495704503954f06a800b90303b4b | b7c1e2ddb7540a995037db06ce7273bff30a56cd | refs/heads/master | 2021-01-23T07:03:52.834758 | 2013-12-26T23:52:55 | 2013-12-26T23:52:55 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,701 | py | #!/usr/local/bin/python
#Warren Reed
#Principles of Urban Informatics
#Assignment 4, Problem 1
#Connects to MySQL and creates three tables to store the boroughs.csv, zipCodes, and incidents table.
import MySQLdb
import csv
def incidentsToSql(cur,db):
incidents_csv = csv.reader(open('Incidents_grouped_by_Address_and_Zip.csv'))
rows = list(incidents_csv)
rows = rows[1:]
table = 'incidents'
#make sure to set as root global max_allowed_packet=30000000;
query1 = "DROP TABLE IF EXISTS `{0}`;".format(table)
query2 = "CREATE TABLE `{0}` (incident_address varchar(255) DEFAULT NULL, incident_zip varchar(255) DEFAULT NULL, unique_key INT DEFAULT NULL);".format(table)
query3 = '''INSERT INTO `incidents` VALUES(%s, %s, %s)'''
cur.execute(query1)
db.commit()
cur.execute(query2)
db.commit()
cur.executemany('''INSERT INTO `incidents` VALUES(%s, %s, %s)''', rows)
db.commit()
def boroughToSql(cur,db):
boroughs_csv = csv.reader(open('boroughs_tr.csv'))
boroughs_rows = list(boroughs_csv)
table2 = 'boroughs'
query4 = "DROP TABLE IF EXISTS `{0}`;".format(table2)
query5 = "CREATE TABLE `{0}` (zipcode varchar(255) DEFAULT NULL, borough varchar(255) DEFAULT NULL);".format(table2)
cur.execute(query4)
db.commit()
cur.execute(query5)
db.commit()
cur.executemany('''INSERT INTO boroughs VALUES(%s, %s)''', boroughs_rows)
db.commit()
def zipcodeToSql(cur,db):
zipcodes_csv = csv.reader(open('zipcodes_tr.csv'))
zipcode_rows = list(zipcodes_csv)
zipcode_rows = zipcode_rows[1:]
table3 = 'zipcodes'
query6 = "DROP TABLE IF EXISTS `{0}`;".format(table3)
query7 = "CREATE TABLE `{0}` (name varchar(255) DEFAULT NULL, zip_code_tabulation_area varchar(255) DEFAULT NULL, zt36_d00 varchar(255) DEFAULT NULL, perimeter varchar(255) DEFAULT NULL, lsad_trans varchar(255) DEFAULT NULL, zt36_d00i varchar(255) DEFAULT NULL, lsad varchar(255) DEFAULT NULL, area varchar(255) DEFAULT NULL, latitude varchar(255) DEFAULT NULL, longitude varchar(255) DEFAULT NULL, Total_Population_per_ZIP_Code varchar(255) DEFAULT NULL);".format(table3)
cur.execute(query6)
db.commit()
cur.execute(query7)
db.commit()
cur.executemany('''INSERT INTO zipcodes VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)''', zipcode_rows)
db.commit()
def main():
#connect to database
db = MySQLdb.connect(host="localhost", # your host, usually localhost
user="jwr300", # your username
passwd="jwr300", # your password
db="coursedb") # name of the data base
cur = db.cursor()
incidentsToSql(cur,db)
boroughToSql(cur,db)
zipcodeToSql(cur,db)
db.close()
print "Import complete"
if __name__ == "__main__":
main()
| [
"jkr2110@gmail.com"
] | jkr2110@gmail.com |
cff0bec9aecfd57d0a412db2aab431891bcbd23e | 1d38a0799f8df3639df9e2f295700458abdc1dd4 | /PYTHON/Iniciante/uri-1153-fatorial-simples.py | 881dacce0d5ac30469480aebfe33e1d8d6b1b465 | [] | no_license | wellysonmartins/algoritmos-uri-online-judge | 76df1791b6c8ac7512aa7d2de3a885c5673c9580 | 9f826d797948cb75ec78a2bdc7e91532957620a1 | refs/heads/master | 2020-05-01T07:29:33.155118 | 2019-05-08T14:55:38 | 2019-05-08T14:55:38 | 177,353,047 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | n = int(input())
fatorial = 1
for i in range(0,n):
fatorial *= (n-i)
print(fatorial) | [
"wellysonmartins@gmail.com"
] | wellysonmartins@gmail.com |
9302771d58194193e03bccb929878b0241536387 | 4112156ee55f0f20a4bf38418d29a7cb30f28d7f | /HelloWorldFlask/TestRestService.py | 0a421b99df680af2a8d680bd0d73bfc4193bd3e2 | [] | no_license | joschindlbeck/pyapps | c6cd80ff55407d9eb140fa6620287a604eaefdb2 | 0ce2658925668ce191424f48709056d8408a9e4f | refs/heads/master | 2021-01-22T02:53:51.712720 | 2017-09-03T16:38:58 | 2017-09-03T16:38:58 | 102,255,982 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 920 | py | '''
Created on 03.09.2017
@author: josch
'''
from flask import Flask, jsonify, json
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://wutuser:test@localhost/wut4lunch'
db = SQLAlchemy(app)
@app.route("/testrestservice", methods=['GET'])
def get_users():
userlist = User.query.all()
strUserlist = str(userlist)
return strUserlist
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(80), unique=True)
email = db.Column(db.String(120), unique=True)
comment = db.Column(db.String(120), unique=False)
def __init__(self, username, email, comment):
self.username = username
self.email = email
self.comment = comment
def __repr__(self):
return self
#return jsonify(self)
| [
"josch@VRONINOTEBOOK"
] | josch@VRONINOTEBOOK |
014c26d3917bb754701cda955118a94a740e21d1 | 78ee4480af76322573bce3a65a30c1439c07af2e | /orders/migrations/0003_remove_dishes_quantity.py | 0aa98057b82f3c6450a40e79d4dea6beadd61d18 | [] | no_license | queensaikia/Canteen_services | 68a89a401ff5f44816e9dcf061b51c1efdb9f9fa | 574f3ff388a7eb511f1368e56021ad5a8715f1bf | refs/heads/master | 2020-04-16T22:45:29.735571 | 2020-01-11T10:55:51 | 2020-01-11T10:55:51 | 162,305,400 | 0 | 0 | null | 2018-12-20T15:35:05 | 2018-12-18T15:05:34 | null | UTF-8 | Python | false | false | 326 | py | # Generated by Django 2.1.4 on 2019-01-05 12:07
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('orders', '0002_dishes_quantity'),
]
operations = [
migrations.RemoveField(
model_name='dishes',
name='quantity',
),
]
| [
"queensaikia98@gmail.com"
] | queensaikia98@gmail.com |
21b157508f72a6cad97e88415f909c29e537211a | e077e9a91f5afdbe01977490d23c20d1133c57d0 | /module_week/ex_data_structures/enum/enum_complex_values.py | f3d9856ebdc5dc951160438ec14527b65153b0d3 | [] | no_license | kai0200/PythonST | a645239114c998ab47acc612ab47b6ccd711cfe7 | 64b0424c88d2b026e1881736a94e550ebc7416a6 | refs/heads/master | 2021-01-23T11:49:36.554333 | 2020-08-11T10:01:48 | 2020-08-11T10:01:48 | 14,545,056 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,195 | py | #!/usr/bin/env python3
# encoding: utf-8
import enum
class BugStatus(enum.Enum):
new = {
'num': 7,
'transitions': [
'incomplete',
'invalid',
'wont_fix',
'in_progress',
],
}
incomplete = {
'num': 6,
'transitions': ['new', 'wont_fix'],
}
invalid = {
'num': 5,
'transitions': ['new'],
}
wont_fix = {
'num': 4,
'transitions': ['new'],
}
in_progress = {
'num': 3,
'transitions': ['new', 'fix_committed'],
}
fix_committed = {
'num': 2,
'transitions': ['in_progress', 'fix_released'],
}
fix_released = {
'num': 1,
'transitions': ['new'],
}
def __init__(self, vals):
self.num = vals['num']
self.transitions = vals['transitions']
def can_transition(self, new_state):
return new_state.name in self.transitions
print('Name:', BugStatus.in_progress)
print('Value:', BugStatus.in_progress.value)
print('Custom attribute:', BugStatus.in_progress.transitions)
print('Using attribute:',
BugStatus.in_progress.can_transition(BugStatus.new))
| [
"wangkai@wangkaideMacBook-Pro.local"
] | wangkai@wangkaideMacBook-Pro.local |
36411eb463c030ab4360eebfa9af78fa62396e0f | 5e434bcedb9cfd14b26d7c8a2dc6ccdf132a8b83 | /test/test.py | 04a91fd7df5dae6a21c2e573a7b2a1b86f8f9d36 | [
"MIT"
] | permissive | mindw00rk/ccxt | 5884e73ac871e66bdfd0e86f6634e141b008b967 | b2f9ee175ea93d70b3699081fd84285f63254fec | refs/heads/master | 2021-07-08T20:12:30.199246 | 2017-09-28T06:58:45 | 2017-09-28T06:58:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,764 | py | # -*- coding: utf-8 -*-
import argparse
import os
import sys
import json
import time
from os import _exit
from traceback import format_tb
# ------------------------------------------------------------------------------
root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(root)
# ------------------------------------------------------------------------------
import ccxt # noqa: E402
# ------------------------------------------------------------------------------
class Argv(object):
pass
argv = Argv()
parser = argparse.ArgumentParser()
parser.add_argument('--verbose', action='store_true', help='enable verbose output')
parser.add_argument('--nonce', type=int, help='integer')
parser.add_argument('exchange', type=str, help='exchange id in lowercase', nargs='?')
parser.add_argument('symbol', type=str, help='symbol in uppercase', nargs='?')
parser.parse_args(namespace=argv)
exchanges = {}
# ------------------------------------------------------------------------------
# string coloring functions
def style(s, style):
return str(s) # style + str (s) + '\033[0m'
def green(s):
return style(s, '\033[92m')
def blue(s):
return style(s, '\033[94m')
def yellow(s):
return style(s, '\033[93m')
def red(s):
return style(s, '\033[91m')
def pink(s):
return style(s, '\033[95m')
def bold(s):
return style(s, '\033[1m')
def underline(s):
return style(s, '\033[4m')
# print a colored string
def dump(*args):
print(' '.join([str(arg) for arg in args]))
# print a n error string
def dump_error(*args):
string = ' '.join([str(arg) for arg in args])
# print(string)
sys.stderr.write(string + "\n")
# ------------------------------------------------------------------------------
def handle_all_unhandled_exceptions(type, value, traceback):
dump_error(yellow(str(type) + ' ' + value + '\n\n' + '\n'.join(format_tb(traceback))))
_exit(1) # unrecoverable crash
sys.excepthook = handle_all_unhandled_exceptions
# ------------------------------------------------------------------------------
def test_order_book(exchange, symbol):
if exchange.hasFetchOrderBook:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
dump(green(exchange.id), green(symbol), 'fetching order book...')
orderbook = exchange.fetch_order_book(symbol)
dump(
green(exchange.id),
green(symbol),
'order book',
orderbook['datetime'],
'bid: ' + str(orderbook['bids'][0][0] if len(orderbook['bids']) else 'N/A'),
'bidVolume: ' + str(orderbook['bids'][0][1] if len(orderbook['bids']) else 'N/A'),
'ask: ' + str(orderbook['asks'][0][0] if len(orderbook['asks']) else 'N/A'),
'askVolume: ' + str(orderbook['asks'][0][1] if len(orderbook['asks']) else 'N/A'))
else:
dump(yellow(exchange.id), 'fetch_order_book() supported')
# ------------------------------------------------------------------------------
def test_ohlcv(exchange, symbol):
if exchange.hasFetchOHLCV:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
ohlcvs = exchange.fetch_ohlcv(symbol)
dump(green(exchange.id), 'fetched', green(len(ohlcvs)), 'OHLCVs')
else:
dump(yellow(exchange.id), 'fetch_ohlcv() not supported')
# ------------------------------------------------------------------------------
def test_tickers(exchange):
if exchange.hasFetchTickers:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
dump(green(exchange.id), 'fetching all tickers at once...')
tickers = exchange.fetch_tickers()
dump(green(exchange.id), 'fetched', green(len(list(tickers.keys()))), 'tickers')
else:
dump(yellow(exchange.id), 'fetch_tickers() not supported')
# ------------------------------------------------------------------------------
def test_ticker(exchange, symbol):
if exchange.hasFetchTicker:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
dump(green(exchange.id), green(symbol), 'fetching ticker...')
ticker = exchange.fetch_ticker(symbol)
dump(
green(exchange.id),
green(symbol),
'ticker',
ticker['datetime'],
'high: ' + str(ticker['high']),
'low: ' + str(ticker['low']),
'bid: ' + str(ticker['bid']),
'ask: ' + str(ticker['ask']),
'volume: ' + str(ticker['quoteVolume']))
else:
dump(green(exchange.id), green(symbol), 'fetch_ticker() not supported')
# ------------------------------------------------------------------------------
def test_trades(exchange, symbol):
if exchange.hasFetchTrades:
delay = int(exchange.rateLimit / 1000)
time.sleep(delay)
dump(green(exchange.id), green(symbol), 'fetching trades...')
trades = exchange.fetch_trades(symbol)
dump(green(exchange.id), green(symbol), 'fetched', green(len(list(trades))), 'trades')
else:
dump(green(exchange.id), green(symbol), 'fetch_trades() not supported')
# ------------------------------------------------------------------------------
def test_symbol(exchange, symbol):
dump(green('SYMBOL: ' + symbol))
test_ticker(exchange, symbol)
if exchange.id == 'coinmarketcap':
dump(green(exchange.fetchGlobal()))
else:
test_order_book(exchange, symbol)
test_trades(exchange, symbol)
test_tickers(exchange)
test_ohlcv(exchange, symbol)
# ------------------------------------------------------------------------------
def load_exchange(exchange):
exchange.load_markets()
def test_exchange(exchange):
dump(green('EXCHANGE: ' + exchange.id))
# delay = 2
keys = list(exchange.markets.keys())
# ..........................................................................
# public API
symbol = keys[0]
symbols = [
'BTC/USD',
'BTC/CNY',
'BTC/EUR',
'BTC/ETH',
'ETH/BTC',
'BTC/JPY',
'LTC/BTC',
'USD/SLL',
]
for s in symbols:
if s in keys:
symbol = s
break
if symbol.find('.d') < 0:
test_symbol(exchange, symbol)
# ..........................................................................
# private API
if (not hasattr(exchange, 'apiKey') or (len(exchange.apiKey) < 1)):
return
dump(green(exchange.id), 'fetching balance...')
# balance = exchange.fetch_balance()
exchange.fetch_balance()
dump(green(exchange.id), 'fetched balance')
if exchange.hasFetchOrders:
try:
dump(green(exchange.id), 'fetching orders...')
orders = exchange.fetch_orders()
dump(green(exchange.id), 'fetched', green(str(len(orders))), 'orders')
except (ccxt.ExchangeError, ccxt.NotSupported) as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
# except ccxt.NotSupported as e:
# dump(yellow(type(e).__name__), e.args)
# time.sleep(delay)
# amount = 1
# price = 0.0161
# marketBuy = exchange.create_market_buy_order(symbol, amount)
# print(marketBuy)
# time.sleep(delay)
# marketSell = exchange.create_market_sell_order(symbol, amount)
# print(marketSell)
# time.sleep(delay)
# limitBuy = exchange.create_limit_buy_order(symbol, amount, price)
# print(limitBuy)
# time.sleep(delay)
# limitSell = exchange.create_limit_sell_order(symbol, amount, price)
# print(limitSell)
# time.sleep(delay)
# ------------------------------------------------------------------------------
def try_all_proxies(exchange, proxies):
current_proxy = 0
max_retries = len(proxies)
if exchange.proxy:
current_proxy = proxies.index(exchange.proxy)
for num_retries in range(0, max_retries):
try:
exchange.proxy = proxies[current_proxy]
dump(green(exchange.id), 'using proxy', '`' + exchange.proxy + '`')
current_proxy = (current_proxy + 1) % len(proxies)
load_exchange(exchange)
test_exchange(exchange)
break
except ccxt.RequestTimeout as e:
dump_error(yellow('[' + type(e).__name__ + ']'), str(e))
except ccxt.NotSupported as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
except ccxt.DDoSProtection as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
except ccxt.ExchangeNotAvailable as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
except ccxt.AuthenticationError as e:
dump_error(yellow('[' + type(e).__name__ + ']'), str(e))
except ccxt.ExchangeError as e:
dump_error(yellow('[' + type(e).__name__ + ']'), e.args)
# ------------------------------------------------------------------------------
proxies = [
'',
'https://cors-anywhere.herokuapp.com/',
'https://crossorigin.me/',
]
# prefer local testing keys to global keys
keys_global = './keys.json'
keys_local = './keys.local.json'
keys_file = keys_local if os.path.exists(keys_local) else keys_global
# load the api keys from config
with open(keys_file) as file:
config = json.load(file)
# instantiate all exchanges
for id in ccxt.exchanges:
exchange = getattr(ccxt, id)
exchanges[id] = exchange({'verbose': argv.verbose})
# set up api keys appropriately
tuples = list(ccxt.Exchange.keysort(config).items())
for (id, params) in tuples:
if id in exchanges:
options = list(params.items())
for key in params:
setattr(exchanges[id], key, params[key])
# ------------------------------------------------------------------------------
def main():
if argv.exchange:
exchange = exchanges[argv.exchange]
symbol = argv.symbol
if hasattr(exchange, 'skip') and exchange.skip:
dump(green(exchange.id), 'skipped')
else:
if symbol:
load_exchange(exchange)
test_symbol(exchange, symbol)
else:
try_all_proxies(exchange, proxies)
else:
tuples = list(ccxt.Exchange.keysort(exchanges).items())
for (id, params) in tuples:
if id in exchanges:
exchange = exchanges[id]
if hasattr(exchange, 'skip') and exchange.skip:
dump(green(exchange.id), 'skipped')
else:
try_all_proxies(exchange, proxies)
# ------------------------------------------------------------------------------
main()
| [
"igor.kroitor@gmail.com"
] | igor.kroitor@gmail.com |
0a42aef527397154efb2c2a9566601d890371e10 | 2492d9380852ee3d23ec2613a9fd4a7b44f3f70a | /COVID19/corona/data/sars.py | de9c0a257259279885f043401aa4a63b61b29354 | [
"Apache-2.0"
] | permissive | testpilot0/covid | 47dfe44a515e9fd51db8f1290a94246bd6b0d9c4 | c0787e0a5a70d829b69c945fdcdfcd073a30d4d7 | refs/heads/master | 2022-12-24T20:10:35.164587 | 2020-09-30T03:58:21 | 2020-09-30T03:58:21 | 266,587,314 | 2 | 2 | Apache-2.0 | 2020-08-27T20:15:27 | 2020-05-24T17:07:15 | Jupyter Notebook | UTF-8 | Python | false | false | 37,780 | py | sars = """
1 atattaggtt tttacctacc caggaaaagc caaccaacct cgatctcttg tagatctgtt
61 ctctaaacga actttaaaat ctgtgtagct gtcgctcggc tgcatgccta gtgcacctac
121 gcagtataaa caataataaa ttttactgtc gttgacaaga aacgagtaac tcgtccctct
181 tctgcagact gcttacggtt tcgtccgtgt tgcagtcgat catcagcata cctaggtttc
241 gtccgggtgt gaccgaaagg taagatggag agccttgttc ttggtgtcaa cgagaaaaca
301 cacgtccaac tcagtttgcc tgtccttcag gttagagacg tgctagtgcg tggcttcggg
361 gactctgtgg aagaggccct atcggaggca cgtgaacacc tcaaaaatgg cacttgtggt
421 ctagtagagc tggaaaaagg cgtactgccc cagcttgaac agccctatgt gttcattaaa
481 cgttctgatg ccttaagcac caatcacggc cacaaggtcg ttgagctggt tgcagaaatg
541 gacggcattc agtacggtcg tagcggtata acactgggag tactcgtgcc acatgtgggc
601 gaaaccccaa ttgcataccg caatgttctt cttcgtaaga acggtaataa gggagccggt
661 ggtcatagct atggcatcga tctaaagtct tatgacttag gtgacgagct tggcactgat
721 cccattgaag attatgaaca aaactggaac actaagcatg gcagtggtgc actccgtgaa
781 ctcactcgtg agctcaatgg aggtgcagtc actcgctatg tcgacaacaa tttctgtggc
841 ccagatgggt accctcttga ttgcatcaaa gattttctcg cacgcgcggg caagtcaatg
901 tgcactcttt ccgaacaact tgattacatc gagtcgaaga gaggtgtcta ctgctgccgt
961 gaccatgagc atgaaattgc ctggttcact gagcgctctg ataagagcta cgagcaccag
1021 acacccttcg aaattaagag tgccaagaaa tttgacactt tcaaagggga atgcccaaag
1081 tttgtgtttc ctcttaactc aaaagtcaaa gtcattcaac cacgtgttga aaagaaaaag
1141 actgagggtt tcatggggcg tatacgctct gtgtaccctg ttgcatctcc acaggagtgt
1201 aacaatatgc acttgtctac cttgatgaaa tgtaatcatt gcgatgaagt ttcatggcag
1261 acgtgcgact ttctgaaagc cacttgtgaa cattgtggca ctgaaaattt agttattgaa
1321 ggacctacta catgtgggta cctacctact aatgctgtag tgaaaatgcc atgtcctgcc
1381 tgtcaagacc cagagattgg acctgagcat agtgttgcag attatcacaa ccactcaaac
1441 attgaaactc gactccgcaa gggaggtagg actagatgtt ttggaggctg tgtgtttgcc
1501 tatgttggct gctataataa gcgtgcctac tgggttcctc gtgctagtgc tgatattggc
1561 tcaggccata ctggcattac tggtgacaat gtggagacct tgaatgagga tctccttgag
1621 atactgagtc gtgaacgtgt taacattaac attgttggcg attttcattt gaatgaagag
1681 gttgccatca ttttggcatc tttctctgct tctacaagtg cctttattga cactataaag
1741 agtcttgatt acaagtcttt caaaaccatt gttgagtcct gcggtaacta taaagttacc
1801 aagggaaagc ccgtaaaagg tgcttggaac attggacaac agagatcagt tttaacacca
1861 ctgtgtggtt ttccctcaca ggctgctggt gttatcagat caatttttgc gcgcacactt
1921 gatgcagcaa accactcaat tcctgatttg caaagagcag ctgtcaccat acttgatggt
1981 atttctgaac agtcattacg tcttgtcgac gccatggttt atacttcaga cctgctcacc
2041 aacagtgtca ttattatggc atatgtaact ggtggtcttg tacaacagac ttctcagtgg
2101 ttgtctaatc ttttgggcac tactgttgaa aaactcaggc ctatctttga atggattgag
2161 gcgaaactta gtgcaggagt tgaatttctc aaggatgctt gggagattct caaatttctc
2221 attacaggtg tttttgacat cgtcaagggt caaatacagg ttgcttcaga taacatcaag
2281 gattgtgtaa aatgcttcat tgatgttgtt aacaaggcac tcgaaatgtg cattgatcaa
2341 gtcactatcg ctggcgcaaa gttgcgatca ctcaacttag gtgaagtctt catcgctcaa
2401 agcaagggac tttaccgtca gtgtatacgt ggcaaggagc agctgcaact actcatgcct
2461 cttaaggcac caaaagaagt aacctttctt gaaggtgatt cacatgacac agtacttacc
2521 tctgaggagg ttgttctcaa gaacggtgaa ctcgaagcac tcgagacgcc cgttgatagc
2581 ttcacaaatg gagctatcgt tggcacacca gtctgtgtaa atggcctcat gctcttagag
2641 attaaggaca aagaacaata ctgcgcattg tctcctggtt tactggctac aaacaatgtc
2701 tttcgcttaa aagggggtgc accaattaaa ggtgtaacct ttggagaaga tactgtttgg
2761 gaagttcaag gttacaagaa tgtgagaatc acatttgagc ttgatgaacg tgttgacaaa
2821 gtgcttaatg aaaagtgctc tgtctacact gttgaatccg gtaccgaagt tactgagttt
2881 gcatgtgttg tagcagaggc tgttgtgaag actttacaac cagtttctga tctccttacc
2941 aacatgggta ttgatcttga tgagtggagt gtagctacat tctacttatt tgatgatgct
3001 ggtgaagaaa acttttcatc acgtatgtat tgttcctttt accctccaga tgaggaagaa
3061 gaggacgatg cagagtgtga ggaagaagaa attgatgaaa cctgtgaaca tgagtacggt
3121 acagaggatg attatcaagg tctccctctg gaatttggtg cctcagctga aacagttcga
3181 gttgaggaag aagaagagga agactggctg gatgatacta ctgagcaatc agagattgag
3241 ccagaaccag aacctacacc tgaagaacca gttaatcagt ttactggtta tttaaaactt
3301 actgacaatg ttgccattaa atgtgttgac atcgttaagg aggcacaaag tgctaatcct
3361 atggtgattg taaatgctgc taacatacac ctgaaacatg gtggtggtgt agcaggtgca
3421 ctcaacaagg caaccaatgg tgccatgcaa aaggagagtg atgattacat taagctaaat
3481 ggccctctta cagtaggagg gtcttgtttg ctttctggac ataatcttgc taagaagtgt
3541 ctgcatgttg ttggacctaa cctaaatgca ggtgaggaca tccagcttct taaggcagca
3601 tatgaaaatt tcaattcaca ggacatctta cttgcaccat tgttgtcagc aggcatattt
3661 ggtgctaaac cacttcagtc tttacaagtg tgcgtgcaga cggttcgtac acaggtttat
3721 attgcagtca atgacaaagc tctttatgag caggttgtca tggattatct tgataacctg
3781 aagcctagag tggaagcacc taaacaagag gagccaccaa acacagaaga ttccaaaact
3841 gaggagaaat ctgtcgtaca gaagcctgtc gatgtgaagc caaaaattaa ggcctgcatt
3901 gatgaggtta ccacaacact ggaagaaact aagtttctta ccaataagtt actcttgttt
3961 gctgatatca atggtaagct ttaccatgat tctcagaaca tgcttagagg tgaagatatg
4021 tctttccttg agaaggatgc accttacatg gtaggtgatg ttatcactag tggtgatatc
4081 acttgtgttg taataccctc caaaaaggct ggtggcacta ctgagatgct ctcaagagct
4141 ttgaagaaag tgccagttga tgagtatata accacgtacc ctggacaagg atgtgctggt
4201 tatacacttg aggaagctaa gactgctctt aagaaatgca aatctgcatt ttatgtacta
4261 ccttcagaag cacctaatgc taaggaagag attctaggaa ctgtatcctg gaatttgaga
4321 gaaatgcttg ctcatgctga agagacaaga aaattaatgc ctatatgcat ggatgttaga
4381 gccataatgg caaccatcca acgtaagtat aaaggaatta aaattcaaga gggcatcgtt
4441 gactatggtg tccgattctt cttttatact agtaaagagc ctgtagcttc tattattacg
4501 aagctgaact ctctaaatga gccgcttgtc acaatgccaa ttggttatgt gacacatggt
4561 tttaatcttg aagaggctgc gcgctgtatg cgttctctta aagctcctgc cgtagtgtca
4621 gtatcatcac cagatgctgt tactacatat aatggatacc tcacttcgtc atcaaagaca
4681 tctgaggagc actttgtaga aacagtttct ttggctggct cttacagaga ttggtcctat
4741 tcaggacagc gtacagagtt aggtgttgaa tttcttaagc gtggtgacaa aattgtgtac
4801 cacactctgg agagccccgt cgagtttcat cttgacggtg aggttctttc acttgacaaa
4861 ctaaagagtc tcttatccct gcgggaggtt aagactataa aagtgttcac aactgtggac
4921 aacactaatc tccacacaca gcttgtggat atgtctatga catatggaca gcagtttggt
4981 ccaacatact tggatggtgc tgatgttaca aaaattaaac ctcatgtaaa tcatgagggt
5041 aagactttct ttgtactacc tagtgatgac acactacgta gtgaagcttt cgagtactac
5101 catactcttg atgagagttt tcttggtagg tacatgtctg ctttaaacca cacaaagaaa
5161 tggaaatttc ctcaagttgg tggtttaact tcaattaaat gggctgataa caattgttat
5221 ttgtctagtg ttttattagc acttcaacag cttgaagtca aattcaatgc accagcactt
5281 caagaggctt attatagagc ccgtgctggt gatgctgcta acttttgtgc actcatactc
5341 gcttacagta ataaaactgt tggcgagctt ggtgatgtca gagaaactat gacccatctt
5401 ctacagcatg ctaatttgga atctgcaaag cgagttctta atgtggtgtg taaacattgt
5461 ggtcagaaaa ctactacctt aacgggtgta gaagctgtga tgtatatggg tactctatct
5521 tatgataatc ttaagacagg tgtttccatt ccatgtgtgt gtggtcgtga tgctacacaa
5581 tatctagtac aacaagagtc ttcttttgtt atgatgtctg caccacctgc tgagtataaa
5641 ttacagcaag gtacattctt atgtgcgaat gagtacactg gtaactatca gtgtggtcat
5701 tacactcata taactgctaa ggagaccctc tatcgtattg acggagctca ccttacaaag
5761 atgtcagagt acaaaggacc agtgactgat gttttctaca aggaaacatc ttacactaca
5821 accatcaagc ctgtgtcgta taaactcgat ggagttactt acacagagat tgaaccaaaa
5881 ttggatgggt attataaaaa ggataatgct tactatacag agcagcctat agaccttgta
5941 ccaactcaac cattaccaaa tgcgagtttt gataatttca aactcacatg ttctaacaca
6001 aaatttgctg atgatttaaa tcaaatgaca ggcttcacaa agccagcttc acgagagcta
6061 tctgtcacat tcttcccaga cttgaatggc gatgtagtgg ctattgacta tagacactat
6121 tcagcgagtt tcaagaaagg tgctaaatta ctgcataagc caattgtttg gcacattaac
6181 caggctacaa ccaagacaac gttcaaacca aacacttggt gtttacgttg tctttggagt
6241 acaaagccag tagatacttc aaattcattt gaagttctgg cagtagaaga cacacaagga
6301 atggacaatc ttgcttgtga aagtcaacaa cccacctctg aagaagtagt ggaaaatcct
6361 accatacaga aggaagtcat agagtgtgac gtgaaaacta ccgaagttgt aggcaatgtc
6421 atacttaaac catcagatga aggtgttaaa gtaacacaag agttaggtca tgaggatctt
6481 atggctgctt atgtggaaaa cacaagcatt accattaaga aacctaatga gctttcacta
6541 gccttaggtt taaaaacaat tgccactcat ggtattgctg caattaatag tgttccttgg
6601 agtaaaattt tggcttatgt caaaccattc ttaggacaag cagcaattac aacatcaaat
6661 tgcgctaaga gattagcaca acgtgtgttt aacaattata tgccttatgt gtttacatta
6721 ttgttccaat tgtgtacttt tactaaaagt accaattcta gaattagagc ttcactacct
6781 acaactattg ctaaaaatag tgttaagagt gttgctaaat tatgtttgga tgccggcatt
6841 aattatgtga agtcacccaa attttctaaa ttgttcacaa tcgctatgtg gctattgttg
6901 ttaagtattt gcttaggttc tctaatctgt gtaactgctg cttttggtgt actcttatct
6961 aattttggtg ctccttctta ttgtaatggc gttagagaat tgtatcttaa ttcgtctaac
7021 gttactacta tggatttctg tgaaggttct tttccttgca gcatttgttt aagtggatta
7081 gactcccttg attcttatcc agctcttgaa accattcagg tgacgatttc atcgtacaag
7141 ctagacttga caattttagg tctggccgct gagtgggttt tggcatatat gttgttcaca
7201 aaattctttt atttattagg tctttcagct ataatgcagg tgttctttgg ctattttgct
7261 agtcatttca tcagcaattc ttggctcatg tggtttatca ttagtattgt acaaatggca
7321 cccgtttctg caatggttag gatgtacatc ttctttgctt ctttctacta catatggaag
7381 agctatgttc atatcatgga tggttgcacc tcttcgactt gcatgatgtg ctataagcgc
7441 aatcgtgcca cacgcgttga gtgtacaact attgttaatg gcatgaagag atctttctat
7501 gtctatgcaa atggaggccg tggcttctgc aagactcaca attggaattg tctcaattgt
7561 gacacatttt gcactggtag tacattcatt agtgatgaag ttgctcgtga tttgtcactc
7621 cagtttaaaa gaccaatcaa ccctactgac cagtcatcgt atattgttga tagtgttgct
7681 gtgaaaaatg gcgcgcttca cctctacttt gacaaggctg gtcaaaagac ctatgagaga
7741 catccgctct cccattttgt caatttagac aatttgagag ctaacaacac taaaggttca
7801 ctgcctatta atgtcatagt ttttgatggc aagtccaaat gcgacgagtc tgcttctaag
7861 tctgcttctg tgtactacag tcagctgatg tgccaaccta ttctgttgct tgaccaagct
7921 cttgtatcag acgttggaga tagtactgaa gtttccgtta agatgtttga tgcttatgtc
7981 gacacctttt cagcaacttt tagtgttcct atggaaaaac ttaaggcact tgttgctaca
8041 gctcacagcg agttagcaaa gggtgtagct ttagatggtg tcctttctac attcgtgtca
8101 gctgcccgac aaggtgttgt tgataccgat gttgacacaa aggatgttat tgaatgtctc
8161 aaactttcac atcactctga cttagaagtg acaggtgaca gttgtaacaa tttcatgctc
8221 acctataata aggttgaaaa catgacgccc agagatcttg gcgcatgtat tgactgtaat
8281 gcaaggcata tcaatgccca agtagcaaaa agtcacaatg tttcactcat ctggaatgta
8341 aaagactaca tgtctttatc tgaacagctg cgtaaacaaa ttcgtagtgc tgccaagaag
8401 aacaacatac cttttagact aacttgtgct acaactagac aggttgtcaa tgtcataact
8461 actaaaatct cactcaaggg tggtaagatt gttagtactt gttttaaact tatgcttaag
8521 gccacattat tgtgcgttct tgctgcattg gtttgttata tcgttatgcc agtacataca
8581 ttgtcaatcc atgatggtta cacaaatgaa atcattggtt acaaagccat tcaggatggt
8641 gtcactcgtg acatcatttc tactgatgat tgttttgcaa ataaacatgc tggttttgac
8701 gcatggttta gccagcgtgg tggttcatac aaaaatgaca aaagctgccc tgtagtagct
8761 gctatcatta caagagagat tggtttcata gtgcctggct taccgggtac tgtgctgaga
8821 gcaatcaatg gtgacttctt gcattttcta cctcgtgttt ttagtgctgt tggcaacatt
8881 tgctacacac cttccaaact cattgagtat agtgattttg ctacctctgc ttgcgttctt
8941 gctgctgagt gtacaatttt taaggatgct atgggcaaac ctgtgccata ttgttatgac
9001 actaatttgc tagagggttc tatttcttat agtgagcttc gtccagacac tcgttatgtg
9061 cttatggatg gttccatcat acagtttcct aacacttacc tggagggttc tgttagagta
9121 gtaacaactt ttgatgctga gtactgtaga catggtacat gcgaaaggtc agaagtaggt
9181 atttgcctat ctaccagtgg tagatgggtt cttaataatg agcattacag agctctatca
9241 ggagttttct gtggtgttga tgcgatgaat ctcatagcta acatctttac tcctcttgtg
9301 caacctgtgg gtgctttaga tgtgtctgct tcagtagtgg ctggtggtat tattgccata
9361 ttggtgactt gtgctgccta ctactttatg aaattcagac gtgtttttgg tgagtacaac
9421 catgttgttg ctgctaatgc acttttgttt ttgatgtctt tcactatact ctgtctggta
9481 ccagcttaca gctttctgcc gggagtctac tcagtctttt acttgtactt gacattctat
9541 ttcaccaatg atgtttcatt cttggctcac cttcaatggt ttgccatgtt ttctcctatt
9601 gtgccttttt ggataacagc aatctatgta ttctgtattt ctctgaagca ctgccattgg
9661 ttctttaaca actatcttag gaaaagagtc atgtttaatg gagttacatt tagtaccttc
9721 gaggaggctg ctttgtgtac ctttttgctc aacaaggaaa tgtacctaaa attgcgtagc
9781 gagacactgt tgccacttac acagtataac aggtatcttg ctctatataa caagtacaag
9841 tatttcagtg gagccttaga tactaccagc tatcgtgaag cagcttgctg ccacttagca
9901 aaggctctaa atgactttag caactcaggt gctgatgttc tctaccaacc accacagaca
9961 tcaatcactt ctgctgttct gcagagtggt tttaggaaaa tggcattccc gtcaggcaaa
10021 gttgaagggt gcatggtaca agtaacctgt ggaactacaa ctcttaatgg attgtggttg
10081 gatgacacag tatactgtcc aagacatgtc atttgcacag cagaagacat gcttaatcct
10141 aactatgaag atctgctcat tcgcaaatcc aaccatagct ttcttgttca ggctggcaat
10201 gttcaacttc gtgttattgg ccattctatg caaaattgtc tgcttaggct taaagttgat
10261 acttctaacc ctaagacacc caagtataaa tttgtccgta tccaacctgg tcaaacattt
10321 tcagttctag catgctacaa tggttcacca tctggtgttt atcagtgtgc catgagacct
10381 aatcatacca ttaaaggttc tttccttaat ggatcatgtg gtagtgttgg ttttaacatt
10441 gattatgatt gcgtgtcttt ctgctatatg catcatatgg agcttccaac aggagtacac
10501 gctggtactg acttagaagg taaattctat ggtccatttg ttgacagaca aactgcacag
10561 gctgcaggta cagacacaac cataacatta aatgttttgg catggctgta tgctgctgtt
10621 atcaatggtg ataggtggtt tcttaataga ttcaccacta ctttgaatga ctttaacctt
10681 gtggcaatga agtacaacta tgaacctttg acacaagatc atgttgacat attgggacct
10741 ctttctgctc aaacaggaat tgccgtctta gatatgtgtg ctgctttgaa agagctgctg
10801 cagaatggta tgaatggtcg tactatcctt ggtagcacta ttttagaaga tgagtttaca
10861 ccatttgatg ttgttagaca atgctctggt gttaccttcc aaggtaagtt caagaaaatt
10921 gttaagggca ctcatcattg gatgctttta actttcttga catcactatt gattcttgtt
10981 caaagtacac agtggtcact gtttttcttt gtttacgaga atgctttctt gccatttact
11041 cttggtatta tggcaattgc tgcatgtgct atgctgcttg ttaagcataa gcacgcattc
11101 ttgtgcttgt ttctgttacc ttctcttgca acagttgctt actttaatat ggtctacatg
11161 cctgctagct gggtgatgcg tatcatgaca tggcttgaat tggctgacac tagcttgtct
11221 ggttataggc ttaaggattg tgttatgtat gcttcagctt tagttttgct tattctcatg
11281 acagctcgca ctgtttatga tgatgctgct agacgtgttt ggacactgat gaatgtcatt
11341 acacttgttt acaaagtcta ctatggtaat gctttagatc aagctatttc catgtgggcc
11401 ttagttattt ctgtaacctc taactattct ggtgtcgtta cgactatcat gtttttagct
11461 agagctatag tgtttgtgtg tgttgagtat tacccattgt tatttattac tggcaacacc
11521 ttacagtgta tcatgcttgt ttattgtttc ttaggctatt gttgctgctg ctactttggc
11581 cttttctgtt tactcaaccg ttacttcagg cttactcttg gtgtttatga ctacttggtc
11641 tctacacaag aatttaggta tatgaactcc caggggcttt tgcctcctaa gagtagtatt
11701 gatgctttca agcttaacat taagttgttg ggtattggag gtaaaccatg tatcaaggtt
11761 gctactgtac agtctaaaat gtctgacgta aagtgcacat ctgtggtact gctctcggtt
11821 cttcaacaac ttagagtaga gtcatcttct aaattgtggg cacaatgtgt acaactccac
11881 aatgatattc ttcttgcaaa agacacaact gaagctttcg agaagatggt ttctcttttg
11941 tctgttttgc tatccatgca gggtgctgta gacattaata ggttgtgcga ggaaatgctc
12001 gataaccgtg ctactcttca ggctattgct tcagaattta gttctttacc atcatatgcc
12061 gcttatgcca ctgcccagga ggcctatgag caggctgtag ctaatggtga ttctgaagtc
12121 gttctcaaaa agttaaagaa atctttgaat gtggctaaat ctgagtttga ccgtgatgct
12181 gccatgcaac gcaagttgga aaagatggca gatcaggcta tgacccaaat gtacaaacag
12241 gcaagatctg aggacaagag ggcaaaagta actagtgcta tgcaaacaat gctcttcact
12301 atgcttagga agcttgataa tgatgcactt aacaacatta tcaacaatgc gcgtgatggt
12361 tgtgttccac tcaacatcat accattgact acagcagcca aactcatggt tgttgtccct
12421 gattatggta cctacaagaa cacttgtgat ggtaacacct ttacatatgc atctgcactc
12481 tgggaaatcc agcaagttgt tgatgcggat agcaagattg ttcaacttag tgaaattaac
12541 atggacaatt caccaaattt ggcttggcct cttattgtta cagctctaag agccaactca
12601 gctgttaaac tacagaataa tgaactgagt ccagtagcac tacgacagat gtcctgtgcg
12661 gctggtacca cacaaacagc ttgtactgat gacaatgcac ttgcctacta taacaattcg
12721 aagggaggta ggtttgtgct ggcattacta tcagaccacc aagatctcaa atgggctaga
12781 ttccctaaga gtgatggtac aggtacaatt tacacagaac tggaaccacc ttgtaggttt
12841 gttacagaca caccaaaagg gcctaaagtg aaatacttgt acttcatcaa aggcttaaac
12901 aacctaaata gaggtatggt gctgggcagt ttagctgcta cagtacgtct tcaggctgga
12961 aatgctacag aagtacctgc caattcaact gtgctttcct tctgtgcttt tgcagtagac
13021 cctgctaaag catataagga ttacctagca agtggaggac aaccaatcac caactgtgtg
13081 aagatgttgt gtacacacac tggtacagga caggcaatta ctgtaacacc agaagctaac
13141 atggaccaag agtcctttgg tggtgcttca tgttgtctgt attgtagatg ccacattgac
13201 catccaaatc ctaaaggatt ctgtgacttg aaaggtaagt acgtccaaat acctaccact
13261 tgtgctaatg acccagtggg ttttacactt agaaacacag tctgtaccgt ctgcggaatg
13321 tggaaaggtt atggctgtag ttgtgaccaa ctccgcgaac ccttgatgca gtctgcggat
13381 gcatcaacgt ttttaaacgg gtttgcggtg taagtgcagc ccgtcttaca ccgtgcggca
13441 caggcactag tactgatgtc gtctacaggg cttttgatat ttacaacgaa aaagttgctg
13501 gttttgcaaa gttcctaaaa actaattgct gtcgcttcca ggagaaggat gaggaaggca
13561 atttattaga ctcttacttt gtagttaaga ggcatactat gtctaactac caacatgaag
13621 agactattta taacttggtt aaagattgtc cagcggttgc tgtccatgac tttttcaagt
13681 ttagagtaga tggtgacatg gtaccacata tatcacgtca gcgtctaact aaatacacaa
13741 tggctgattt agtctatgct ctacgtcatt ttgatgaggg taattgtgat acattaaaag
13801 aaatactcgt cacatacaat tgctgtgatg atgattattt caataagaag gattggtatg
13861 acttcgtaga gaatcctgac atcttacgcg tatatgctaa cttaggtgag cgtgtacgcc
13921 aatcattatt aaagactgta caattctgcg atgctatgcg tgatgcaggc attgtaggcg
13981 tactgacatt agataatcag gatcttaatg ggaactggta cgatttcggt gatttcgtac
14041 aagtagcacc aggctgcgga gttcctattg tggattcata ttactcattg ctgatgccca
14101 tcctcacttt gactagggca ttggctgctg agtcccatat ggatgctgat ctcgcaaaac
14161 cacttattaa gtgggatttg ctgaaatatg attttacgga agagagactt tgtctcttcg
14221 accgttattt taaatattgg gaccagacat accatcccaa ttgtattaac tgtttggatg
14281 ataggtgtat ccttcattgt gcaaacttta atgtgttatt ttctactgtg tttccaccta
14341 caagttttgg accactagta agaaaaatat ttgtagatgg tgttcctttt gttgtttcaa
14401 ctggatacca ttttcgtgag ttaggagtcg tacataatca ggatgtaaac ttacatagct
14461 cgcgtctcag tttcaaggaa cttttagtgt atgctgctga tccagctatg catgcagctt
14521 ctggcaattt attgctagat aaacgcacta catgcttttc agtagctgca ctaacaaaca
14581 atgttgcttt tcaaactgtc aaacccggta attttaataa agacttttat gactttgctg
14641 tgtctaaagg tttctttaag gaaggaagtt ctgttgaact aaaacacttc ttctttgctc
14701 aggatggcaa cgctgctatc agtgattatg actattatcg ttataatctg ccaacaatgt
14761 gtgatatcag acaactccta ttcgtagttg aagttgttga taaatacttt gattgttacg
14821 atggtggctg tattaatgcc aaccaagtaa tcgttaacaa tctggataaa tcagctggtt
14881 tcccatttaa taaatggggt aaggctagac tttattatga ctcaatgagt tatgaggatc
14941 aagatgcact tttcgcgtat actaagcgta atgtcatccc tactataact caaatgaatc
15001 ttaagtatgc cattagtgca aagaatagag ctcgcaccgt agctggtgtc tctatctgta
15061 gtactatgac aaatagacag tttcatcaga aattattgaa gtcaatagcc gccactagag
15121 gagctactgt ggtaattgga acaagcaagt tttacggtgg ctggcataat atgttaaaaa
15181 ctgtttacag tgatgtagaa actccacacc ttatgggttg ggattatcca aaatgtgaca
15241 gagccatgcc taacatgctt aggataatgg cctctcttgt tcttgctcgc aaacataaca
15301 cttgctgtaa cttatcacac cgtttctaca ggttagctaa cgagtgtgcg caagtattaa
15361 gtgagatggt catgtgtggc ggctcactat atgttaaacc aggtggaaca tcatccggtg
15421 atgctacaac tgcttatgct aatagtgtct ttaacatttg tcaagctgtt acagccaatg
15481 taaatgcact tctttcaact gatggtaata agatagctga caagtatgtc cgcaatctac
15541 aacacaggct ctatgagtgt ctctatagaa atagggatgt tgatcatgaa ttcgtggatg
15601 agttttacgc ttacctgcgt aaacatttct ccatgatgat tctttctgat gatgccgttg
15661 tgtgctataa cagtaactat gcggctcaag gtttagtagc tagcattaag aactttaagg
15721 cagttcttta ttatcaaaat aatgtgttca tgtctgaggc aaaatgttgg actgagactg
15781 accttactaa aggacctcac gaattttgct cacagcatac aatgctagtt aaacaaggag
15841 atgattacgt gtacctgcct tacccagatc catcaagaat attaggcgca ggctgttttg
15901 tcgatgatat tgtcaaaaca gatggtacac ttatgattga aaggttcgtg tcactggcta
15961 ttgatgctta cccacttaca aaacatccta atcaggagta tgctgatgtc tttcacttgt
16021 atttacaata cattagaaag ttacatgatg agcttactgg ccacatgttg gacatgtatt
16081 ccgtaatgct aactaatgat aacacctcac ggtactggga acctgagttt tatgaggcta
16141 tgtacacacc acatacagtc ttgcaggctg taggtgcttg tgtattgtgc aattcacaga
16201 cttcacttcg ttgcggtgcc tgtattagga gaccattcct atgttgcaag tgctgctatg
16261 accatgtcat ttcaacatca cacaaattag tgttgtctgt taatccctat gtttgcaatg
16321 ccccaggttg tgatgtcact gatgtgacac aactgtatct aggaggtatg agctattatt
16381 gcaagtcaca taagcctccc attagttttc cattatgtgc taatggtcag gtttttggtt
16441 tatacaaaaa cacatgtgta ggcagtgaca atgtcactga cttcaatgcg atagcaacat
16501 gtgattggac taatgctggc gattacatac ttgccaacac ttgtactgag agactcaagc
16561 ttttcgcagc agaaacgctc aaagccactg aggaaacatt taagctgtca tatggtattg
16621 ccactgtacg cgaagtactc tctgacagag aattgcatct ttcatgggag gttggaaaac
16681 ctagaccacc attgaacaga aactatgtct ttactggtta ccgtgtaact aaaaatagta
16741 aagtacagat tggagagtac acctttgaaa aaggtgacta tggtgatgct gttgtgtaca
16801 gaggtactac gacatacaag ttgaatgttg gtgattactt tgtgttgaca tctcacactg
16861 taatgccact tagtgcacct actctagtgc cacaagagca ctatgtgaga attactggct
16921 tgtacccaac actcaacatc tcagatgagt tttctagcaa tgttgcaaat tatcaaaagg
16981 tcggcatgca aaagtactct acactccaag gaccacctgg tactggtaag agtcattttg
17041 ccatcggact tgctctctat tacccatctg ctcgcatagt gtatacggca tgctctcatg
17101 cagctgttga tgccctatgt gaaaaggcat taaaatattt gcccatagat aaatgtagta
17161 gaatcatacc tgcgcgtgcg cgcgtagagt gttttgataa attcaaagtg aattcaacac
17221 tagaacagta tgttttctgc actgtaaatg cattgccaga aacaactgct gacattgtag
17281 tctttgatga aatctctatg gctactaatt atgacttgag tgttgtcaat gctagacttc
17341 gtgcaaaaca ctacgtctat attggcgatc ctgctcaatt accagccccc cgcacattgc
17401 tgactaaagg cacactagaa ccagaatatt ttaattcagt gtgcagactt atgaaaacaa
17461 taggtccaga catgttcctt ggaacttgtc gccgttgtcc tgctgaaatt gttgacactg
17521 tgagtgcttt agtttatgac aataagctaa aagcacacaa ggataagtca gctcaatgct
17581 tcaaaatgtt ctacaaaggt gttattacac atgatgtttc atctgcaatc aacagacctc
17641 aaataggcgt tgtaagagaa tttcttacac gcaatcctgc ttggagaaaa gctgttttta
17701 tctcacctta taattcacag aacgctgtag cttcaaaaat cttaggattg cctacgcaga
17761 ctgttgattc atcacagggt tctgaatatg actatgtcat attcacacaa actactgaaa
17821 cagcacactc ttgtaatgtc aaccgcttca atgtggctat cacaagggca aaaattggca
17881 ttttgtgcat aatgtctgat agagatcttt atgacaaact gcaatttaca agtctagaaa
17941 taccacgtcg caatgtggct acattacaag cagaaaatgt aactggactt tttaaggact
18001 gtagtaagat cattactggt cttcatccta cacaggcacc tacacacctc agcgttgata
18061 taaagttcaa gactgaagga ttatgtgttg acataccagg cataccaaag gacatgacct
18121 accgtagact catctctatg atgggtttca aaatgaatta ccaagtcaat ggttacccta
18181 atatgtttat cacccgcgaa gaagctattc gtcacgttcg tgcgtggatt ggctttgatg
18241 tagagggctg tcatgcaact agagatgctg tgggtactaa cctacctctc cagctaggat
18301 tttctacagg tgttaactta gtagctgtac cgactggtta tgttgacact gaaaataaca
18361 cagaattcac cagagttaat gcaaaacctc caccaggtga ccagtttaaa catcttatac
18421 cactcatgta taaaggcttg ccctggaatg tagtgcgtat taagatagta caaatgctca
18481 gtgatacact gaaaggattg tcagacagag tcgtgttcgt cctttgggcg catggctttg
18541 agcttacatc aatgaagtac tttgtcaaga ttggacctga aagaacgtgt tgtctgtgtg
18601 acaaacgtgc aacttgcttt tctacttcat cagatactta tgcctgctgg aatcattctg
18661 tgggttttga ctatgtctat aacccattta tgattgatgt tcagcagtgg ggctttacgg
18721 gtaaccttca gagtaaccat gaccaacatt gccaggtaca tggaaatgca catgtggcta
18781 gttgtgatgc tatcatgact agatgtttag cagtccatga gtgctttgtt aagcgcgttg
18841 attggtctgt tgaataccct attataggag atgaactgag ggttaattct gcttgcagaa
18901 aagtacaaca catggttgtg aagtctgcat tgcttgctga taagtttcca gttcttcatg
18961 acattggaaa tccaaaggct atcaagtgtg tgcctcaggc tgaagtagaa tggaagttct
19021 acgatgctca gccatgtagt gacaaagctt acaaaataga ggaactcttc tattcttatg
19081 ctacacatca cgataaattc actgatggtg tttgtttgtt ttggaattgt aacgttgatc
19141 gttacccagc caatgcaatt gtgtgtaggt ttgacacaag agtcttgtca aacttgaact
19201 taccaggctg tgatggtggt agtttgtatg tgaataagca tgcattccac actccagctt
19261 tcgataaaag tgcatttact aatttaaagc aattgccttt cttttactat tctgatagtc
19321 cttgtgagtc tcatggcaaa caagtagtgt cggatattga ttatgttcca ctcaaatctg
19381 ctacgtgtat tacacgatgc aatttaggtg gtgctgtttg cagacaccat gcaaatgagt
19441 accgacagta cttggatgca tataatatga tgatttctgc tggatttagc ctatggattt
19501 acaaacaatt tgatacttat aacctgtgga atacatttac caggttacag agtttagaaa
19561 atgtggctta taatgttgtt aataaaggac actttgatgg acacgccggc gaagcacctg
19621 tttccatcat taataatgct gtttacacaa aggtagatgg tattgatgtg gagatctttg
19681 aaaataagac aacacttcct gttaatgttg catttgagct ttgggctaag cgtaacatta
19741 aaccagtgcc agagattaag atactcaata atttgggtgt tgatatcgct gctaatactg
19801 taatctggga ctacaaaaga gaagccccag cacatgtatc tacaataggt gtctgcacaa
19861 tgactgacat tgccaagaaa cctactgaga gtgcttgttc ttcacttact gtcttgtttg
19921 atggtagagt ggaaggacag gtagaccttt ttagaaacgc ccgtaatggt gttttaataa
19981 cagaaggttc agtcaaaggt ctaacacctt caaagggacc agcacaagct agcgtcaatg
20041 gagtcacatt aattggagaa tcagtaaaaa cacagtttaa ctactttaag aaagtagacg
20101 gcattattca acagttgcct gaaacctact ttactcagag cagagactta gaggatttta
20161 agcccagatc acaaatggaa actgactttc tcgagctcgc tatggatgaa ttcatacagc
20221 gatataagct cgagggctat gccttcgaac acatcgttta tggagatttc agtcatggac
20281 aacttggcgg tcttcattta atgataggct tagccaagcg ctcacaagat tcaccactta
20341 aattagagga ttttatccct atggacagca cagtgaaaaa ttacttcata acagatgcgc
20401 aaacaggttc atcaaaatgt gtgtgttctg tgattgatct tttacttgat gactttgtcg
20461 agataataaa gtcacaagat ttgtcagtga tttcaaaagt ggtcaaggtt acaattgact
20521 atgctgaaat ttcattcatg ctttggtgta aggatggaca tgttgaaacc ttctacccaa
20581 aactacaagc aagtcaagcg tggcaaccag gtgttgcgat gcctaacttg tacaagatgc
20641 aaagaatgct tcttgaaaag tgtgaccttc agaattatgg tgaaaatgct gttataccaa
20701 aaggaataat gatgaatgtc gcaaagtata ctcaactgtg tcaatactta aatacactta
20761 ctttagctgt accctacaac atgagagtta ttcactttgg tgctggctct gataaaggag
20821 ttgcaccagg tacagctgtg ctcagacaat ggttgccaac tggcacacta cttgtcgatt
20881 cagatcttaa tgacttcgtc tccgacgcag attctacttt aattggagac tgtgcaacag
20941 tacatacggc taataaatgg gaccttatta ttagcgatat gtatgaccct aggaccaaac
21001 atgtgacaaa agagaatgac tctaaagaag ggtttttcac ttatctgtgt ggatttataa
21061 agcaaaaact agccctgggt ggttctatag ctgtaaagat aacagagcat tcttggaatg
21121 ctgaccttta caagcttatg ggccatttct catggtggac agcttttgtt acaaatgtaa
21181 atgcatcatc atcggaagca tttttaattg gggctaacta tcttggcaag ccgaaggaac
21241 aaattgatgg ctataccatg catgctaact acattttctg gaggaacaca aatcctatcc
21301 agttgtcttc ctattcactc tttgacatga gcaaatttcc tcttaaatta agaggaactg
21361 ctgtaatgtc tcttaaggag aatcaaatca atgatatgat ttattctctt ctggaaaaag
21421 gtaggcttat cattagagaa aacaacagag ttgtggtttc aagtgatatt cttgttaaca
21481 actaaacgaa catgtttatt ttcttattat ttcttactct cactagtggt agtgaccttg
21541 accggtgcac cacttttgat gatgttcaag ctcctaatta cactcaacat acttcatcta
21601 tgaggggggt ttactatcct gatgaaattt ttagatcaga cactctttat ttaactcagg
21661 atttatttct tccattttat tctaatgtta cagggtttca tactattaat catacgtttg
21721 gcaaccctgt catacctttt aaggatggta tttattttgc tgccacagag aaatcaaatg
21781 ttgtccgtgg ttgggttttt ggttctacca tgaacaacaa gtcacagtcg gtgattatta
21841 ttaacaattc tactaatgtt gttatacgag catgtaactt tgaattgtgt gacaaccctt
21901 tctttgctgt ttctaaaccc atgggtacac agacacatac tatgatattc gataatgcat
21961 ttaattgcac tttcgagtac atatctgatg ccttttcgct tgatgtttca gaaaagtcag
22021 gtaattttaa acacttacga gagtttgtgt ttaaaaataa agatgggttt ctctatgttt
22081 ataagggcta tcaacctata gatgtagttc gtgatctacc ttctggtttt aacactttga
22141 aacctatttt taagttgcct cttggtatta acattacaaa ttttagagcc attcttacag
22201 ccttttcacc tgctcaagac atttggggca cgtcagctgc agcctatttt gttggctatt
22261 taaagccaac tacatttatg ctcaagtatg atgaaaatgg tacaatcaca gatgctgttg
22321 attgttctca aaatccactt gctgaactca aatgctctgt taagagcttt gagattgaca
22381 aaggaattta ccagacctct aatttcaggg ttgttccctc aggagatgtt gtgagattcc
22441 ctaatattac aaacttgtgt ccttttggag aggtttttaa tgctactaaa ttcccttctg
22501 tctatgcatg ggagagaaaa aaaatttcta attgtgttgc tgattactct gtgctctaca
22561 actcaacatt tttttcaacc tttaagtgct atggcgtttc tgccactaag ttgaatgatc
22621 tttgcttctc caatgtctat gcagattctt ttgtagtcaa gggagatgat gtaagacaaa
22681 tagcgccagg acaaactggt gttattgctg attataatta taaattgcca gatgatttca
22741 tgggttgtgt ccttgcttgg aatactagga acattgatgc tacttcaact ggtaattata
22801 attataaata taggtatctt agacatggca agcttaggcc ctttgagaga gacatatcta
22861 atgtgccttt ctcccctgat ggcaaacctt gcaccccacc tgctcttaat tgttattggc
22921 cattaaatga ttatggtttt tacaccacta ctggcattgg ctaccaacct tacagagttg
22981 tagtactttc ttttgaactt ttaaatgcac cggccacggt ttgtggacca aaattatcca
23041 ctgaccttat taagaaccag tgtgtcaatt ttaattttaa tggactcact ggtactggtg
23101 tgttaactcc ttcttcaaag agatttcaac catttcaaca atttggccgt gatgtttctg
23161 atttcactga ttccgttcga gatcctaaaa catctgaaat attagacatt tcaccttgcg
23221 cttttggggg tgtaagtgta attacacctg gaacaaatgc ttcatctgaa gttgctgttc
23281 tatatcaaga tgttaactgc actgatgttt ctacagcaat tcatgcagat caactcacac
23341 cagcttggcg catatattct actggaaaca atgtattcca gactcaagca ggctgtctta
23401 taggagctga gcatgtcgac acttcttatg agtgcgacat tcctattgga gctggcattt
23461 gtgctagtta ccatacagtt tctttattac gtagtactag ccaaaaatct attgtggctt
23521 atactatgtc tttaggtgct gatagttcaa ttgcttactc taataacacc attgctatac
23581 ctactaactt ttcaattagc attactacag aagtaatgcc tgtttctatg gctaaaacct
23641 ccgtagattg taatatgtac atctgcggag attctactga atgtgctaat ttgcttctcc
23701 aatatggtag cttttgcaca caactaaatc gtgcactctc aggtattgct gctgaacagg
23761 atcgcaacac acgtgaagtg ttcgctcaag tcaaacaaat gtacaaaacc ccaactttga
23821 aatattttgg tggttttaat ttttcacaaa tattacctga ccctctaaag ccaactaaga
23881 ggtcttttat tgaggacttg ctctttaata aggtgacact cgctgatgct ggcttcatga
23941 agcaatatgg cgaatgccta ggtgatatta atgctagaga tctcatttgt gcgcagaagt
24001 tcaatggact tacagtgttg ccacctctgc tcactgatga tatgattgct gcctacactg
24061 ctgctctagt tagtggtact gccactgctg gatggacatt tggtgctggc gctgctcttc
24121 aaataccttt tgctatgcaa atggcatata ggttcaatgg cattggagtt acccaaaatg
24181 ttctctatga gaaccaaaaa caaatcgcca accaatttaa caaggcgatt agtcaaattc
24241 aagaatcact tacaacaaca tcaactgcat tgggcaagct gcaagacgtt gttaaccaga
24301 atgctcaagc attaaacaca cttgttaaac aacttagctc taattttggt gcaatttcaa
24361 gtgtgctaaa tgatatcctt tcgcgacttg ataaagtcga ggcggaggta caaattgaca
24421 ggttaattac aggcagactt caaagccttc aaacctatgt aacacaacaa ctaatcaggg
24481 ctgctgaaat cagggcttct gctaatcttg ctgctactaa aatgtctgag tgtgttcttg
24541 gacaatcaaa aagagttgac ttttgtggaa agggctacca ccttatgtcc ttcccacaag
24601 cagccccgca tggtgttgtc ttcctacatg tcacgtatgt gccatcccag gagaggaact
24661 tcaccacagc gccagcaatt tgtcatgaag gcaaagcata cttccctcgt gaaggtgttt
24721 ttgtgtttaa tggcacttct tggtttatta cacagaggaa cttcttttct ccacaaataa
24781 ttactacaga caatacattt gtctcaggaa attgtgatgt cgttattggc atcattaaca
24841 acacagttta tgatcctctg caacctgagc ttgactcatt caaagaagag ctggacaagt
24901 acttcaaaaa tcatacatca ccagatgttg atcttggcga catttcaggc attaacgctt
24961 ctgtcgtcaa cattcaaaaa gaaattgacc gcctcaatga ggtcgctaaa aatttaaatg
25021 aatcactcat tgaccttcaa gaattgggaa aatatgagca atatattaaa tggccttggt
25081 atgtttggct cggcttcatt gctggactaa ttgccatcgt catggttaca atcttgcttt
25141 gttgcatgac tagttgttgc agttgcctca agggtgcatg ctcttgtggt tcttgctgca
25201 agtttgatga ggatgactct gagccagttc tcaagggtgt caaattacat tacacataaa
25261 cgaacttatg gatttgttta tgagattttt tactcttaga tcaattactg cacagccagt
25321 aaaaattgac aatgcttctc ctgcaagtac tgttcatgct acagcaacga taccgctaca
25381 agcctcactc cctttcggat ggcttgttat tggcgttgca tttcttgctg tttttcagag
25441 cgctaccaaa ataattgcgc tcaataaaag atggcagcta gccctttata agggcttcca
25501 gttcatttgc aatttactgc tgctatttgt taccatctat tcacatcttt tgcttgtcgc
25561 tgcaggtatg gaggcgcaat ttttgtacct ctatgccttg atatattttc tacaatgcat
25621 caacgcatgt agaattatta tgagatgttg gctttgttgg aagtgcaaat ccaagaaccc
25681 attactttat gatgccaact actttgtttg ctggcacaca cataactatg actactgtat
25741 accatataac agtgtcacag atacaattgt cgttactgaa ggtgacggca tttcaacacc
25801 aaaactcaaa gaagactacc aaattggtgg ttattctgag gataggcact caggtgttaa
25861 agactatgtc gttgtacatg gctatttcac cgaagtttac taccagcttg agtctacaca
25921 aattactaca gacactggta ttgaaaatgc tacattcttc atctttaaca agcttgttaa
25981 agacccaccg aatgtgcaaa tacacacaat cgacggctct tcaggagttg ctaatccagc
26041 aatggatcca atttatgatg agccgacgac gactactagc gtgcctttgt aagcacaaga
26101 aagtgagtac gaacttatgt actcattcgt ttcggaagaa acaggtacgt taatagttaa
26161 tagcgtactt ctttttcttg ctttcgtggt attcttgcta gtcacactag ccatccttac
26221 tgcgcttcga ttgtgtgcgt actgctgcaa tattgttaac gtgagtttag taaaaccaac
26281 ggtttacgtc tactcgcgtg ttaaaaatct gaactcttct gaaggagttc ctgatcttct
26341 ggtctaaacg aactaactat tattattatt ctgtttggaa ctttaacatt gcttatcatg
26401 gcagacaacg gtactattac cgttgaggag cttaaacaac tcctggaaca atggaaccta
26461 gtaataggtt tcctattcct agcctggatt atgttactac aatttgccta ttctaatcgg
26521 aacaggtttt tgtacataat aaagcttgtt ttcctctggc tcttgtggcc agtaacactt
26581 gcttgttttg tgcttgctgc tgtctacaga attaattggg tgactggcgg gattgcgatt
26641 gcaatggctt gtattgtagg cttgatgtgg cttagctact tcgttgcttc cttcaggctg
26701 tttgctcgta cccgctcaat gtggtcattc aacccagaaa caaacattct tctcaatgtg
26761 cctctccggg ggacaattgt gaccagaccg ctcatggaaa gtgaacttgt cattggtgct
26821 gtgatcattc gtggtcactt gcgaatggcc ggacactccc tagggcgctg tgacattaag
26881 gacctgccaa aagagatcac tgtggctaca tcacgaacgc tttcttatta caaattagga
26941 gcgtcgcagc gtgtaggcac tgattcaggt tttgctgcat acaaccgcta ccgtattgga
27001 aactataaat taaatacaga ccacgccggt agcaacgaca atattgcttt gctagtacag
27061 taagtgacaa cagatgtttc atcttgttga cttccaggtt acaatagcag agatattgat
27121 tatcattatg aggactttca ggattgctat ttggaatctt gacgttataa taagttcaat
27181 agtgagacaa ttatttaagc ctctaactaa gaagaattat tcggagttag atgatgaaga
27241 acctatggag ttagattatc cataaaacga acatgaaaat tattctcttc ctgacattga
27301 ttgtatttac atcttgcgag ctatatcact atcaggagtg tgttagaggt acgactgtac
27361 tactaaaaga accttgccca tcaggaacat acgagggcaa ttcaccattt caccctcttg
27421 ctgacaataa atttgcacta acttgcacta gcacacactt tgcttttgct tgtgctgacg
27481 gtactcgaca tacctatcag ctgcgtgcaa gatcagtttc accaaaactt ttcatcagac
27541 aagaggaggt tcaacaagag ctctactcgc cactttttct cattgttgct gctctagtat
27601 ttttaatact ttgcttcacc attaagagaa agacagaatg aatgagctca ctttaattga
27661 cttctatttg tgctttttag cctttctgct attccttgtt ttaataatgc ttattatatt
27721 ttggttttca ctcgaaatcc aggatctaga agaaccttgt accaaagtct aaacgaacat
27781 gaaacttctc attgttttga cttgtatttc tctatgcagt tgcatatgca ctgtagtaca
27841 gcgctgtgca tctaataaac ctcatgtgct tgaagatcct tgtaaggtac aacactaggg
27901 gtaatactta tagcactgct tggctttgtg ctctaggaaa ggttttacct tttcatagat
27961 ggcacactat ggttcaaaca tgcacaccta atgttactat caactgtcaa gatccagctg
28021 gtggtgcgct tatagctagg tgttggtacc ttcatgaagg tcaccaaact gctgcattta
28081 gagacgtact tgttgtttta aataaacgaa caaattaaaa tgtctgataa tggaccccaa
28141 tcaaaccaac gtagtgcccc ccgcattaca tttggtggac ccacagattc aactgacaat
28201 aaccagaatg gaggacgcaa tggggcaagg ccaaaacagc gccgacccca aggtttaccc
28261 aataatactg cgtcttggtt cacagctctc actcagcatg gcaaggagga acttagattc
28321 cctcgaggcc agggcgttcc aatcaacacc aatagtggtc cagatgacca aattggctac
28381 taccgaagag ctacccgacg agttcgtggt ggtgacggca aaatgaaaga gctcagcccc
28441 agatggtact tctattacct aggaactggc ccagaagctt cacttcccta cggcgctaac
28501 aaagaaggca tcgtatgggt tgcaactgag ggagccttga atacacccaa agaccacatt
28561 ggcacccgca atcctaataa caatgctgcc accgtgctac aacttcctca aggaacaaca
28621 ttgccaaaag gcttctacgc agagggaagc agaggcggca gtcaagcctc ttctcgctcc
28681 tcatcacgta gtcgcggtaa ttcaagaaat tcaactcctg gcagcagtag gggaaattct
28741 cctgctcgaa tggctagcgg aggtggtgaa actgccctcg cgctattgct gctagacaga
28801 ttgaaccagc ttgagagcaa agtttctggt aaaggccaac aacaacaagg ccaaactgtc
28861 actaagaaat ctgctgctga ggcatctaaa aagcctcgcc aaaaacgtac tgccacaaaa
28921 cagtacaacg tcactcaagc atttgggaga cgtggtccag aacaaaccca aggaaatttc
28981 ggggaccaag acctaatcag acaaggaact gattacaaac attggccgca aattgcacaa
29041 tttgctccaa gtgcctctgc attctttgga atgtcacgca ttggcatgga agtcacacct
29101 tcgggaacat ggctgactta tcatggagcc attaaattgg atgacaaaga tccacaattc
29161 aaagacaacg tcatactgct gaacaagcac attgacgcat acaaaacatt cccaccaaca
29221 gagcctaaaa aggacaaaaa gaaaaagact gatgaagctc agcctttgcc gcagagacaa
29281 aagaagcagc ccactgtgac tcttcttcct gcggctgaca tggatgattt ctccagacaa
29341 cttcaaaatt ccatgagtgg agcttctgct gattcaactc aggcataaac actcatgatg
29401 accacacaag gcagatgggc tatgtaaacg ttttcgcaat tccgtttacg atacatagtc
29461 tactcttgtg cagaatgaat tctcgtaact aaacagcaca agtaggttta gttaacttta
29521 atctcacata gcaatcttta atcaatgtgt aacattaggg aggacttgaa agagccacca
29581 cattttcatc gaggccacgc ggagtacgat cgagggtaca gtgaataatg ctagggagag
29641 ctgcctatat ggaagagccc taatgtgtaa aattaatttt agtagtgcta tccccatgtg
29701 attttaatag cttcttagga gaatgacaaa aaaaaaaaaa aaaaaaaaaa a
"""
for s in " \n0123456789":
sars = sars.replace(s, "")
sars = sars.upper()
| [
"noreply@github.com"
] | testpilot0.noreply@github.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.