blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ec6cb4a00540f0ac4bd6f72a34768a5046f2c0b4 | aae89d8db87f9071204002c2b723937e37e7e8eb | /LOONGAT/loongat/04-Menu/4.1-Applications/4.1.6-SoundVideo/4.1.6.2-MpvMediaPlayer-F/4.1.6.2-MpvMediaPlayer-F.py | f34e22479e12e7ba90ad9c35570705857e7fa026 | [] | no_license | zhangyacnhao/AutoTest | 4c3f6663e03dbaf37b5ca55b5d2ac4e3521728dc | e3536fd3f2e52aa4030937ec104c7341e5bc753e | refs/heads/master | 2020-12-02T06:39:25.956986 | 2017-07-11T08:40:08 | 2017-07-11T08:40:08 | 96,871,707 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,882 | py | #!/usr/bin/env python
#coding=utf-8
from ldtp import *
from ldtputils import *
from time import sleep
import sys
import os
import commands
reload(sys)
#sys.setdefaultencoding("utf8")
#路径添加
g_currentPath = sys.path[0]
g_publicLibPath = os.environ['AUTOTEST_PUBLIC_LIB']
sys.path.append(g_publicLibPath)
#导入框架 库
from logcase import Logcase
from caseobject import CaseObject
from screenshot import Screenshot
#########################################################################
#全局变量区域
g_tag ="4.1.6.2-MpvMediaPlayer-F"
mylog = Logcase()
Sshot = Screenshot()
winName = "未命名 - MpvMediaPlayer"
global passwd
def chkdir(dir):
try:
if not os.path.exists(dir):
mylog.ilog(g_tag, 'Begin to create Pics Directory')
if commands.getstatusoutput('mkdir resource screenshot result')[0] == 0:
mylog.ilog(g_tag, 'Create target dirs successfully!')
else:
mylog.elog(g_tag, 'Create target dirs failed!')
return False
else:
res=os.system('rm -rf screenshot/* result/*')
if res !=0:
mylog.ilog(g_tag,'Delete target dirs Failed!')
mylog.ilog(g_tag,'./resouce has existed!' )
except (NameError,Exception) as e:
mylog.elog(g_tag,'chkdir function Exited False')
print e
return False
finally:
return True
def work():
os.system('mpv --profile=pseudo-gui')
from time import sleep
#res = waittillguiexist('frmWelcome')
#print res
#if res==1:
# mylog.ilog(g_tag,'i catch the window')
sleep(5)
Sshot.scrprint(g_tag, 'After_MpvMediaPlayer', './')
sleep(4)
print 'will be close the window'
kcmd = "pkill mpv"
os.system(kcmd)
#else:
# mylog.elog(g_tag,'catch the window False')
def main():
os.chdir(g_currentPath)
res = chkdir('resource')
if res !=True:
print 'Target dirs not exist!'
mylog.elog(g_tag,'Target dirs not exist!')
sys.exit()
work()
if __name__ == '__main__':
main()
| [
"13644118303@163.com"
] | 13644118303@163.com |
14698f5e208340300976981461b72d99053e4499 | 6fcfb638fa725b6d21083ec54e3609fc1b287d9e | /python/django_django/django-master/django/views/static.py | 479c59cac6c4165e1254d9a1815a56860e62d1b5 | [] | no_license | LiuFang816/SALSTM_py_data | 6db258e51858aeff14af38898fef715b46980ac1 | d494b3041069d377d6a7a9c296a14334f2fa5acc | refs/heads/master | 2022-12-25T06:39:52.222097 | 2019-12-12T08:49:07 | 2019-12-12T08:49:07 | 227,546,525 | 10 | 7 | null | 2022-12-19T02:53:01 | 2019-12-12T07:29:39 | Python | UTF-8 | Python | false | false | 5,108 | py | """
Views and functions for serving static files. These are only to be used
during development, and SHOULD NOT be used in a production setting.
"""
import mimetypes
import os
import posixpath
import re
import stat
from urllib.parse import unquote
from django.http import (
FileResponse, Http404, HttpResponse, HttpResponseNotModified,
HttpResponseRedirect,
)
from django.template import Context, Engine, TemplateDoesNotExist, loader
from django.utils.http import http_date, parse_http_date
from django.utils.translation import gettext as _, gettext_lazy
def serve(request, path, document_root=None, show_indexes=False):
"""
Serve static files below a given point in the directory structure.
To use, put a URL pattern such as::
from django.views.static import serve
url(r'^(?P<path>.*)$', serve, {'document_root': '/path/to/my/files/'})
in your URLconf. You must provide the ``document_root`` param. You may
also set ``show_indexes`` to ``True`` if you'd like to serve a basic index
of the directory. This index view will use the template hardcoded below,
but if you'd like to override it, you can create a template called
``static/directory_index.html``.
"""
path = posixpath.normpath(unquote(path))
path = path.lstrip('/')
newpath = ''
for part in path.split('/'):
if not part:
# Strip empty path components.
continue
drive, part = os.path.splitdrive(part)
head, part = os.path.split(part)
if part in (os.curdir, os.pardir):
# Strip '.' and '..' in path.
continue
newpath = os.path.join(newpath, part).replace('\\', '/')
if newpath and path != newpath:
return HttpResponseRedirect(newpath)
fullpath = os.path.join(document_root, newpath)
if os.path.isdir(fullpath):
if show_indexes:
return directory_index(newpath, fullpath)
raise Http404(_("Directory indexes are not allowed here."))
if not os.path.exists(fullpath):
raise Http404(_('"%(path)s" does not exist') % {'path': fullpath})
# Respect the If-Modified-Since header.
statobj = os.stat(fullpath)
if not was_modified_since(request.META.get('HTTP_IF_MODIFIED_SINCE'),
statobj.st_mtime, statobj.st_size):
return HttpResponseNotModified()
content_type, encoding = mimetypes.guess_type(fullpath)
content_type = content_type or 'application/octet-stream'
response = FileResponse(open(fullpath, 'rb'), content_type=content_type)
response["Last-Modified"] = http_date(statobj.st_mtime)
if stat.S_ISREG(statobj.st_mode):
response["Content-Length"] = statobj.st_size
if encoding:
response["Content-Encoding"] = encoding
return response
DEFAULT_DIRECTORY_INDEX_TEMPLATE = """
{% load i18n %}
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="Content-type" content="text/html; charset=utf-8" />
<meta http-equiv="Content-Language" content="en-us" />
<meta name="robots" content="NONE,NOARCHIVE" />
<title>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</title>
</head>
<body>
<h1>{% blocktrans %}Index of {{ directory }}{% endblocktrans %}</h1>
<ul>
{% if directory != "/" %}
<li><a href="../">../</a></li>
{% endif %}
{% for f in file_list %}
<li><a href="{{ f|urlencode }}">{{ f }}</a></li>
{% endfor %}
</ul>
</body>
</html>
"""
template_translatable = gettext_lazy("Index of %(directory)s")
def directory_index(path, fullpath):
try:
t = loader.select_template([
'static/directory_index.html',
'static/directory_index',
])
except TemplateDoesNotExist:
t = Engine(libraries={'i18n': 'django.templatetags.i18n'}).from_string(DEFAULT_DIRECTORY_INDEX_TEMPLATE)
files = []
for f in os.listdir(fullpath):
if not f.startswith('.'):
if os.path.isdir(os.path.join(fullpath, f)):
f += '/'
files.append(f)
c = Context({
'directory': path + '/',
'file_list': files,
})
return HttpResponse(t.render(c))
def was_modified_since(header=None, mtime=0, size=0):
"""
Was something modified since the user last downloaded it?
header
This is the value of the If-Modified-Since header. If this is None,
I'll just return True.
mtime
This is the modification time of the item we're talking about.
size
This is the size of the item we're talking about.
"""
try:
if header is None:
raise ValueError
matches = re.match(r"^([^;]+)(; length=([0-9]+))?$", header,
re.IGNORECASE)
header_mtime = parse_http_date(matches.group(1))
header_len = matches.group(3)
if header_len and int(header_len) != size:
raise ValueError
if int(mtime) > header_mtime:
raise ValueError
except (AttributeError, ValueError, OverflowError):
return True
return False
| [
"659338505@qq.com"
] | 659338505@qq.com |
2e95ff4a78787e3e931b2ee198077cfb1abc5341 | 05d9291f8d02bb98a3d2a3c0b49858f1f2e6d834 | /quick_service/quick_service/doctype/production/production.py | 6a2cdf6f6856a35d314f88b03bcf957788dac9f7 | [
"MIT"
] | permissive | leaftechnology/quick_service | b1e6fa4012bcc75816cd4e895e4f9e6b9105c2a8 | 69ff87a33b3f135b7d12977c3e95727243b2f740 | refs/heads/master | 2023-05-04T05:30:22.610405 | 2021-05-22T05:12:41 | 2021-05-22T05:12:41 | 301,926,248 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 17,180 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2020, jan and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from frappe.model.document import Document
from erpnext.stock.stock_ledger import get_previous_sle
from frappe.utils import cint, flt
from datetime import datetime
class Production(Document):
@frappe.whitelist()
def change_status(self, status):
if status == "Closed" or status == "Completed":
frappe.db.sql(""" UPDATE `tabProduction` SET last_status=%s WHERE name=%s """,(self.status, self.name))
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s """,(status, self.name))
frappe.db.commit()
elif status == "Open":
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s """, (self.last_status, self.name))
frappe.db.commit()
if status == "Completed":
self.get_service_records()
@frappe.whitelist()
def get_service_records(self):
estimation_ = ""
estimation = frappe.db.sql(""" SELECT * FROM `tabProduction` WHERE name= %s""", self.name, as_dict=1)
if len(estimation) > 0:
estimation_ = estimation[0].estimation
frappe.db.sql(""" UPDATE `tabEstimation` SET status=%s WHERE name=%s""",
("Completed", estimation_))
inspections = frappe.db.sql(""" SELECT * FROM `tabInspection Table` WHERE parent=%s """, estimation_, as_dict=1)
for i in inspections:
frappe.db.sql(""" UPDATE `tabInspection` SET status=%s WHERE name=%s""",
("Completed", i.inspection))
srn = frappe.db.sql(""" SELECT * FROM `tabEstimation` WHERE name=%s """, estimation_, as_dict=1)
if len(srn) > 0:
srn_ = srn[0].service_receipt_note
frappe.db.sql(""" UPDATE `tabService Receipt Note` SET status=%s WHERE name=%s""",
("Completed", srn_))
frappe.db.commit()
def on_update_after_submit(self):
for i in self.raw_material:
if i.production:
get_qty = frappe.db.sql(""" SELECT * FROM `tabProduction` WHERE name=%s""", i.production, as_dict=1)
get_qty_total = frappe.db.sql(""" SELECT SUM(qty_raw_material) as qty_raw_material FROM `tabRaw Material` WHERE production=%s """, i.production, as_dict=1)
if get_qty[0].qty == get_qty_total[0].qty_raw_material:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s, last_status=%s WHERE name=%s""", ("Completed",get_qty[0].status,i.production))
frappe.db.commit()
else:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s, last_status=%s WHERE name=%s""", ("Linked",get_qty[0].status,i.production))
frappe.db.commit()
@frappe.whitelist()
def change_production_status(self, production):
raw_material = frappe.db.sql(""" SELECT * FROM `tabRaw Material` WHERE name=%s""",production, as_dict=1)
if len(raw_material) > 0 and raw_material[0].production:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""", ("To Deliver and Bill", raw_material[0].production))
frappe.db.commit()
def on_cancel(self):
for i in self.raw_material:
if i.production:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""", ("In Progress", i.production))
frappe.db.commit()
se = frappe.db.sql(""" SELECT * FROM `tabStock Entry` WHERE production=%s """, self.name, as_dict=1)
if len(se) > 0:
for i in se:
se_record = frappe.get_doc("Stock Entry", i.name)
se_record.cancel()
def on_submit(self):
for i in self.raw_material:
if i.production:
get_qty = frappe.db.sql(""" SELECT * FROM `tabProduction` WHERE name=%s""", i.production, as_dict=1)
get_qty_total = frappe.db.sql(""" SELECT SUM(qty_raw_material) as qty_raw_material FROM `tabRaw Material` WHERE production=%s """, i.production, as_dict=1)
if get_qty[0].qty == get_qty_total[0].qty_raw_material:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s, last_status=%s WHERE name=%s""", ("Completed",get_qty[0].status,i.production))
frappe.db.commit()
else:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s, last_status=%s WHERE name=%s""", ("Linked",get_qty[0].status,i.production))
frappe.db.commit()
@frappe.whitelist()
def set_available_qty(self):
time = frappe.utils.now_datetime().time()
date = frappe.utils.now_datetime().date()
for d in self.get('raw_material'):
previous_sle = get_previous_sle({
"item_code": d.item_code,
"warehouse": d.warehouse,
"posting_date": date,
"posting_time": time
})
# get actual stock at source warehouse
d.available_qty = previous_sle.get("qty_after_transaction") or 0
def validate(self):
if self.type == "Assemble":
self.series = "SK-"
elif self.type == "Disassemble":
self.series = "SK-D-"
elif self.type == "Service":
self.series = "CS-"
@frappe.whitelist()
def check_raw_materials(self):
for i in self.raw_material:
if i.available_qty == 0:
return False, i.item_code
return True, ""
@frappe.whitelist()
def generate_se(self):
check,item_code = self.check_raw_materials()
allow_negative_stock = cint(frappe.db.get_value("Stock Settings", None, "allow_negative_stock"))
if check or (not check and allow_negative_stock):
doc_se = {
"doctype": "Stock Entry",
"stock_entry_type": "Manufacture" if self.type == "Assemble" or self.type == "Service" else "Material Issue" if self.type == "Re-Service" else"Repack",
"items": self.get_manufacture_se_items() if self.type == "Assemble" or self.type == "Service" else self.get_material_issue_se_items() if self.type == "Re-Service" else self.get_repack_se_items(),
"production": self.name,
"additional_costs": self.get_additional_costs(),
"analytic_account": self.analytic_account if self.analytic_account else ""
}
frappe.get_doc(doc_se).insert(ignore_permissions=1).submit()
if self.type == "Re-Service":
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""",
("Completed", self.name))
frappe.db.commit()
else:
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""",
("To Deliver and Bill", self.name))
frappe.db.commit()
return ""
else:
frappe.throw("Item " + item_code + " Has no available stock")
@frappe.whitelist()
def generate_finish_good_se(self):
doc_se1 = {
"doctype": "Stock Entry",
"stock_entry_type": "Manufacture",
"production": self.name,
"additional_costs": self.get_additional_costs(),
"items": [{
'item_code': self.item_code_prod,
't_warehouse': self.warehouse,
'qty': self.qty,
'uom': self.umo,
'basic_rate': self.rate,
'cost_center': self.cost_center,
"analytic_account": self.analytic_account
}],
}
frappe.get_doc(doc_se1).insert(ignore_permissions=1).submit()
@frappe.whitelist()
def get_additional_costs(self):
costs = []
for i in self.additional_cost:
costs.append({
"expense_account": i.expense_ledger,
"description": i.description,
"amount": i.additional_cost_amount
})
return costs
@frappe.whitelist()
def generate_dn(self):
if self.input_qty > self.qty_for_sidn:
frappe.throw("Maximum qty that can be generated is " + str(self.qty))
doc_dn = {
"doctype": "Delivery Note",
"customer": self.customer,
"items": self.get_si_items("DN", self.input_qty),
"production": self.get_production_items(self.input_qty),
}
dn = frappe.get_doc(doc_dn)
dn.insert(ignore_permissions=1)
return dn.name
@frappe.whitelist()
def generate_si(self):
if self.input_qty > self.qty_for_sidn:
frappe.throw("Maximum qty that can be generated is " + str(self.qty))
doc_si = {
"doctype": "Sales Invoice",
"customer": self.customer,
"items": self.get_si_items("SI", self.input_qty),
"production": self.get_production_items(self.input_qty),
}
si = frappe.get_doc(doc_si)
si.insert(ignore_permissions=1)
return si.name
@frappe.whitelist()
def generate_jv(self):
doc_jv = {
"doctype": "Journal Entry",
"voucher_type": "Journal Entry",
"posting_date": self.posting_date,
"accounts": self.jv_accounts(),
"production": self.name
}
jv = frappe.get_doc(doc_jv)
jv.insert(ignore_permissions=1)
jv.submit()
@frappe.whitelist()
def jv_accounts(self):
accounts = []
amount = 0
for item in self.advance_payment:
amount += item.amount
accounts.append({
'account': item.expense_account,
'debit_in_account_currency': item.amount,
'credit_in_account_currency': 0,
})
debit_account = frappe.db.sql(""" SELECT * FROM `tabAccount` WHERE name like %s """, "%Debtors%",as_dict=1 )
if len(debit_account) > 0:
accounts.append({
'account': debit_account[0].name,
'debit_in_account_currency': 0,
'credit_in_account_currency': amount,
'party_type': "Customer",
'party': self.customer,
'is_advance': "Yes",
})
print(accounts)
return accounts
@frappe.whitelist()
def get_manufacture_se_items(self):
items = []
for item in self.raw_material:
items.append({
'item_code': item.item_code,
's_warehouse': item.warehouse,
'qty': item.qty_raw_material,
'uom': "Nos",
'basic_rate': item.rate_raw_material,
'cost_center': item.cost_center,
"analytic_account": self.analytic_account
})
items.append({
'item_code': self.item_code_prod,
't_warehouse': self.warehouse,
'qty': self.qty,
'uom': self.umo,
'basic_rate': self.rate,
'cost_center': self.cost_center,
'is_finished_item': 1,
"analytic_account": self.analytic_account
})
return items
@frappe.whitelist()
def get_material_issue_se_items(self):
items = []
for item in self.raw_material:
items.append({
'item_code': item.item_code,
's_warehouse': item.warehouse,
'qty': item.qty_raw_material,
'uom': "Nos",
'basic_rate': item.rate_raw_material,
'cost_center': item.cost_center,
"analytic_account": self.analytic_account
})
return items
@frappe.whitelist()
def get_repack_se_items(self):
items = []
for item in self.raw_material:
if item.available_qty > 0:
items.append({
'item_code': item.item_code,
't_warehouse': item.warehouse,
'qty': item.qty_raw_material,
'uom': "Nos",
'basic_rate': item.rate_raw_material,
'cost_center': item.cost_center,
"analytic_account": self.analytic_account
})
items.append({
'item_code': self.item_code_prod,
's_warehouse': self.warehouse,
'qty': self.qty,
'uom': self.umo,
'basic_rate': self.rate,
'cost_center': self.cost_center,
"analytic_account": self.analytic_account
})
return items
@frappe.whitelist()
def get_si_items(self, type, qty):
obj = {
'item_code': self.item_code_prod,
'item_name': self.get_item_value("item_name"),
'description': self.get_item_value("description"),
'qty': qty,
'uom': "Nos",
'rate': self.invoice_rate,
'cost_center': self.cost_center,
'income_account': self.income_account
}
if type == "DN":
obj["warehouse"] = self.warehouse
return [obj]
@frappe.whitelist()
def get_production_items(self, qty):
return [{
'reference': self.name,
'qty': qty,
'rate': self.invoice_rate,
'amount': self.invoice_rate * qty,
}]
@frappe.whitelist()
def get_sales_man(self):
return [{
'sales_man': self.sales_man,
'reference': self.name,
}]
@frappe.whitelist()
def get_item_value(self, field):
items = frappe.db.sql(""" SELECT * FROM `tabItem` WHERE name=%s """, self.item_code_prod, as_dict=1)
return items[0][field]
@frappe.whitelist()
def get_available_qty(production):
get_qty = frappe.db.sql(""" SELECT * FROM `tabProduction` WHERE name=%s""", production, as_dict=1)
get_qty_total = frappe.db.sql(
""" SELECT SUM(RM.qty_raw_material) as qty_raw_material FROM `tabProduction` AS P INNER JOIN `tabRaw Material` AS RM ON RM.parent = P.name and RM.production=%s WHERE P.docstatus=1 """,
production, as_dict=1)
print(get_qty_total)
return get_qty[0].qty - get_qty_total[0].qty_raw_material if get_qty_total[0].qty_raw_material else get_qty[0].qty
@frappe.whitelist()
def get_rate(item_code, warehouse, based_on,price_list):
time = frappe.utils.now_datetime().time()
date = frappe.utils.now_datetime().date()
balance = 0
if warehouse:
previous_sle = get_previous_sle({
"item_code": item_code,
"warehouse": warehouse,
"posting_date": date,
"posting_time": time
})
# get actual stock at source warehouse
balance = previous_sle.get("qty_after_transaction") or 0
condition = ""
if price_list == "Standard Buying":
condition += " and buying = 1 "
elif price_list == "Standard Selling":
condition += " and selling = 1 "
query = """ SELECT * FROM `tabItem Price` WHERE item_code=%s {0} ORDER BY valid_from DESC LIMIT 1""".format(condition)
item_price = frappe.db.sql(query,item_code, as_dict=1)
rate = item_price[0].price_list_rate if len(item_price) > 0 else 0
print(based_on)
if based_on == "Valuation Rate":
print("WALA DIR")
item_record = frappe.db.sql(
""" SELECT * FROM `tabItem` WHERE item_code=%s""",
item_code, as_dict=1)
rate = item_record[0].valuation_rate if len(item_record) > 0 else 0
if based_on == "Last Purchase Rate":
print("WALA DIR")
item_record = frappe.db.sql(
""" SELECT * FROM `tabItem` WHERE item_code=%s""",
item_code, as_dict=1)
rate = item_record[0].last_purchase_rate if len(item_record) > 0 else 0
return rate, balance
@frappe.whitelist()
def get_uom(item_code):
item = frappe.db.sql(
""" SELECT * FROM `tabItem` WHERE name=%s""",
item_code, as_dict=1)
return item[0].stock_uom, item[0].item_name
@frappe.whitelist()
def get_address(customer):
address = frappe.db.sql("""
SELECT
A.name,
A.address_line1,
A.city,
A.county ,
A.state,
A.country,
A.pincode
FROM `tabAddress` AS A
INNER JOIN `tabDynamic Link` AS DL
ON DL.link_doctype=%s and DL.link_name=%s and DL.parent = A.name
WHERE A.is_primary_address=1 """,("Customer", customer), as_dict=1)
return address[0] if len(address) > 0 else {}
@frappe.whitelist()
def get_jv(production):
jv = frappe.db.sql(""" SELECT * FROM `tabJournal Entry` WHERE production=%s """, production, as_dict=1)
return jv[0].name if len(jv) > 0 else ""
@frappe.whitelist()
def get_se(name):
se = frappe.db.sql(""" SELECT * FROM `tabStock Entry` WHERE production=%s """, name, as_dict=1)
return len(se) > 0
@frappe.whitelist()
def get_dn_or_si(name):
si = frappe.db.sql("""
SELECT * FROM `tabSales Invoice Production` WHERE reference=%s and parenttype=%s """,
(name,"Sales Invoice"), as_dict=1)
dn = frappe.db.sql("""
SELECT * FROM `tabSales Invoice Production` WHERE reference=%s and parenttype=%s """,
(name, "Delivery Note"), as_dict=1)
return len(si) > 0,len(dn) > 0
@frappe.whitelist()
def get_dn_si_qty(item_code, qty, name):
si_query = """
SELECT SIP.qty as qty, SI.status FROM `tabSales Invoice` AS SI
INNER JOIN `tabSales Invoice Production` AS SIP ON SI.name = SIP.parent
WHERE SIP.reference=%s and SIP.parenttype=%s and SI.docstatus = 1 and SI.status!='Cancelled'
"""
si = frappe.db.sql(si_query, (name, "Sales Invoice"), as_dict=1)
dn_query = """
SELECT SIP.qty as qty, DN.status FROM `tabDelivery Note` AS DN
INNER JOIN `tabSales Invoice Production` AS SIP ON DN.name = SIP.parent
WHERE SIP.reference=%s and SIP.parenttype=%s and DN.docstatus = 1 and DN.status!='Cancelled'
"""
dn = frappe.db.sql(dn_query, (name, "Delivery Note"), as_dict=1)
total_qty = 0
if len(si) > len(dn):
for i in si:
total_qty += i.qty
elif len(dn) > len(si):
for d in dn:
total_qty += d.qty
elif len(dn) == len(si):
for d in dn:
total_qty += d.qty
print("TOTALALLL")
print(total_qty)
return float(qty) - float(total_qty)
@frappe.whitelist()
def change_status(name):
frappe.db.sql(""" UPDATE `tabProduction` SET status=%s WHERE name=%s""", ("Partially Delivered", name))
frappe.db.commit()
return 1
@frappe.whitelist()
def get_valuation_rate(item_code):
item = frappe.db.sql(""" SELECT * FROM `tabItem` WHERE item_code=%s""", (item_code),as_dict=1)
return item[0].valuation_rate if len(item) > 0 else 0
@frappe.whitelist()
def compute_selling_price(raw_materials):
import json
selling_price_total = 0
raw_material = json.loads(raw_materials)
for i in raw_material:
warehouse = i['warehouse'] if 'warehouse' in i and i['warehouse'] else "",
if 'item_code' in i:
selling_price = get_rate(i['item_code'],warehouse,"Price List", "Standard Selling")
selling_price_total += (selling_price[0] * i['qty_raw_material'])
return selling_price_total
@frappe.whitelist()
def selling_price_list(raw_materials):
import json
array_selling = []
raw_material = json.loads(raw_materials)
for i in raw_material:
warehouse = i['warehouse'] if 'warehouse' in i and i['warehouse'] else "",
if 'item_code' in i:
selling_price = get_rate(i['item_code'],warehouse,"Price List", "Standard Selling")
array_selling.append({
"item_name": i['item_name'],
"qty_raw_material": i['qty_raw_material'],
"rate_raw_material": selling_price[0] * i['qty_raw_material']
})
return array_selling | [
"jangeles@bai.ph"
] | jangeles@bai.ph |
4fe77fa973de04fe05f0f5b446065fe4cfd739ec | 556467d4f2fa417511b12eb65e58e5d14052b96a | /applications/connection_search/aciConSearch.py | 26dba5e00456912ff2474dae062fd803c48e67b7 | [
"Apache-2.0"
] | permissive | bischatt78/acitoolkit | 56b58ea22ec3f9bf0cd47fcbe09040b34b9cff2f | 7625844e74b0378b1f259e0afdac8500b96ecf32 | refs/heads/master | 2021-01-21T18:34:41.432886 | 2016-03-15T01:56:44 | 2016-03-15T01:56:44 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 42,418 | py | ################################################################################
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
""" Connection Search
This file contains the main routine for reading in the APIC configuration,
putting it into searchable data structures, and providing an interface
whereby a search query, in the form of a flow specification, can be made
and all the matching flow specifications are returned.
The flow specification used in the search allows any of the fields to be ignored,
i.e. match any, and the IP addresses to be masked, e.g. you can specify the source
IP address, SIP, to be 10.13/6
"""
import sys
from copy import copy
import radix
import re
from acitoolkit import Endpoint, Tenant, AppProfile, Contract, EPG, OutsideL3, OutsideEPG, ContractSubject, \
FilterEntry, Context, OutsideNetwork
from acitoolkit.aciphysobject import Session
from acitoolkit.acitoolkitlib import Credentials
class LoginError(Exception):
"""
Exception for login errors.
"""
pass
class IpAddress(object):
"""
This class holds an IP address and allows various functions to be performed on it
"""
def __init__(self, ip):
if isinstance(ip, str):
if ip == '':
self._prefixlen = 0
self.addr = '0.0.0.0'
else:
sections = ip.split('/')
if len(sections) == 1:
self._prefixlen = 32
else:
self._prefixlen = int(sections[1])
self.addr = IpAddress.parse_text(sections[0])
if isinstance(ip, int):
self._prefixlen = 32
self.addr = self.n2s(ip)
@staticmethod
def parse_text(input_ip):
ip_bytes = []
fields = input_ip.split('.')
if len(fields) > 0:
ip_bytes.append(fields[0])
if len(fields) > 1:
ip_bytes.append(fields[1])
else:
ip_bytes.append('0')
if len(fields) > 2:
ip_bytes.append(fields[2])
else:
ip_bytes.append('0')
if len(fields) > 3:
ip_bytes.append(fields[3])
else:
ip_bytes.append('0')
return '.'.join(ip_bytes)
@property
def prefix(self):
return self._get_prefix()
@property
def prefixlen(self):
return self._prefixlen
@property
def mask(self):
return self.n2s(self.mask_num)
@property
def mask_num(self):
return ~(0xFFFFFFFF >> self.prefixlen)
def _get_prefix(self):
return self.n2s(self.s2n(self.addr) & self.mask_num) + '/' + str(self.prefixlen)
def overlap(self, other):
"""
This will return an IpAddress that is the overlap of self and other
:param other:
:return:
"""
assert isinstance(other, IpAddress)
max_addr = min(self.max_address(), other.max_address())
min_addr = max(self.min_address(), other.min_address())
if min_addr <= max_addr:
if self.prefixlen > other.prefixlen:
return self
else:
return other
else:
return None
@staticmethod
def simplify(ip_list):
"""
Will combine and then supernet prefixes in list to
come up with the most simple list
:param ip_list:
:return:
"""
return IpAddress.supernet(IpAddress.combine(ip_list))
@staticmethod
def supernet(ip_list):
"""
Will combine subnets into larger subnets
:param ip_list:
:return:
"""
if len(ip_list) == 1:
return ip_list
new_list = copy(ip_list)
for ip1 in ip_list:
for index in reversed(range(len(new_list))):
ip2 = new_list[index]
if ip1 != ip2:
p1 = ip1.prefix_num >> (32 - ip1.prefixlen)
p2 = ip2.prefix_num >> (32 - ip2.prefixlen)
if (p1 - p2 == 1) and (p1 & 0xFFFFFFFE) == (p2 & 0xFFFFFFFE):
new_ip = IpAddress(str(ip1.prefix))
new_ip._prefixlen -= 1
new_list.remove(ip1)
new_list.remove(ip2)
new_list.append(new_ip)
if len(new_list) == len(ip_list):
return new_list
else:
return IpAddress.supernet(new_list)
@staticmethod
def combine(ip_list):
"""
Will go through list and combine any prefixes that can be combined
and return a new list with result.
:param ip_list:
:return:
"""
new_list = copy(ip_list)
if len(ip_list) > 1:
for candidate in ip_list:
for index in reversed(range(len(new_list))):
other = new_list[index]
if candidate != other:
if IpAddress.encompass(candidate, other):
new_list.remove(other)
if len(new_list) == len(ip_list):
return new_list
else:
return IpAddress.combine(new_list)
else:
return ip_list
@staticmethod
def encompass(ip1, ip2):
if ip1.min_address() <= ip2.min_address() and ip1.max_address() >= ip2.max_address():
return True
else:
return False
def min_address(self):
"""
returns minimum address in the subnet
:return:
"""
return IpAddress(self.n2s(self.prefix_num))
def max_address(self):
"""
returns the maximum address in the subnet
:return:
"""
return IpAddress(self.prefix_num | ~self.mask_num)
@staticmethod
def s2n(address):
"""
This will convert an address string to a number and return the number
:param address:
:return:
"""
fields = address.split('.')
result = int(fields[0]) * (2 ** 24)
result1 = int(fields[1]) * (2 ** 16)
result2 = int(fields[2]) * (2 ** 8)
result3 = int(fields[3])
return result + result1 + result2 + result3
@property
def prefix_num(self):
"""
Will return numeric version of the prefix
:return:
"""
sections = self.prefix.split('/')
fields = sections[0].split('.')
result = int(fields[0])
result = (result << 8) + int(fields[1])
result = (result << 8) + int(fields[2])
result = (result << 8) + int(fields[3])
return result
@staticmethod
def n2s(address):
"""
will return a string in the x.y.w.z format given a number
:param address:
:return:
"""
b3 = str((address & 0xFF000000) >> 24)
b2 = str((address & 0x00FF0000) >> 16)
b1 = str((address & 0x0000FF00) >> 8)
b0 = str(address & 0x000000FF)
return '.'.join([b3, b2, b1, b0])
def equiv(self, other):
"""
Checks to see if self is equivalent to other
This is just like ==, except it will check the prefixes rather than the absolute address
values.
:param other:
:return:
"""
if str(self.prefix) == str(other.prefix):
return True
else:
return False
def __repr__(self):
return '{0}/{1}'.format(self.addr, self.prefixlen)
def __eq__(self, other):
if isinstance(other, str):
if self != IpAddress(other):
return False
else:
if not isinstance(self, IpAddress) or not isinstance(other, IpAddress):
return False
if self.prefix_num != other.prefix_num:
return False
if self.prefixlen != other.prefixlen:
return False
return True
def __ne__(self, other):
if self == other:
return False
else:
return True
def __gt__(self, other):
"""
returns True if self is greater than other
:param other:
:return:
"""
if self.prefixlen == other.prefixlen:
return self.min_address().prefix_num > other.min_address().prefix_num
else:
return self.prefixlen < other.prefixlen
def __ge__(self, other):
"""
returns True if self is greater than or equal to other
:param other:
:return:
"""
if self.prefixlen == other.prefixlen:
return self.prefix_num >= other.prefix_num
else:
return self.prefixlen < other.prefixlen
def __lt__(self, other):
"""
returns True if self is less than other
:param other:
:return:
"""
if self.prefixlen == other.prefixlen:
return self.prefix_num < other.prefix_num
else:
return self.prefixlen > other.prefixlen
def __le__(self, other):
"""
returns True if self is less than or equal to other
:param other:
:return:
"""
if self.prefixlen == other.prefixlen:
return self.prefix_num <= other.prefix_num
else:
return self.prefixlen > other.prefixlen
# noinspection PyPep8Naming,PyPep8Naming,PyPep8Naming,PyPep8Naming
class ProtocolFilter(object):
def __init__(self, aci_filter=None):
self._applyToFrag = 'any'
self._arpOpc = 'any'
self._etherT = 'any'
self._dFromPort = 'any'
self._dToPort = 'any'
self._prot = 'any'
self._sFromPort = 'any'
self._sToPort = 'any'
self._tcpRules = 'any'
if aci_filter is not None:
self.applyToFrag = aci_filter.applyToFrag
self.arpOpc = aci_filter.arpOpc
self.etherT = aci_filter.etherT
self.dFromPort = aci_filter.dFromPort
self.dToPort = aci_filter.dToPort
self.prot = aci_filter.prot
self.sFromPort = aci_filter.sFromPort
self.sToPort = aci_filter.sToPort
self.tcpRules = aci_filter.tcpRules
@property
def applyToFrag(self):
return self._applyToFrag
@applyToFrag.setter
def applyToFrag(self, value):
if value == 'unspecified' or value is None or value == 'any' or value == '*':
self._applyToFrag = 'any'
elif value == 'no':
self._applyToFrag = False
elif value == 'yes':
self._applyToFrag = True
else:
assert isinstance(value, bool)
self._applyToFrag = value
@property
def arpOpc(self):
return self._arpOpc
@arpOpc.setter
def arpOpc(self, value):
if value == 'unspecified' or value is None or value == 'any' or value == '*':
self._arpOpc = 'any'
else:
self._arpOpc = value
@staticmethod
def _port_from_string(value):
match_result = re.match('^\d+$', value)
if match_result is not None:
return int(value)
else:
if value == 'https':
return 443
if value == 'http':
return 80
if value == 'ftp-data':
return 20
if value == 'smtp':
return 25
if value == 'dns':
return 53
if value == 'pop3':
return 110
if value == 'rtsp':
return 554
raise ValueError('Unrecognized layer 4 port value in filter: ' + value)
@property
def dFromPort(self):
return self._dFromPort
@dFromPort.setter
def dFromPort(self, value):
if value == 'unspecified' or value == 'any' or value is None:
self._dFromPort = 'any'
else:
if isinstance(value, str):
self._dFromPort = self._port_from_string(value)
else:
self._dFromPort = value
@property
def dToPort(self):
return self._dToPort
@dToPort.setter
def dToPort(self, value):
if value == 'unspecified' or value == 'any' or value is None:
self._dToPort = 'any'
else:
if isinstance(value, str):
self._dToPort = self._port_from_string(value)
else:
self._dToPort = value
@property
def dPort(self):
return '{0}-{1}'.format(self.dFromPort, self.dToPort)
@dPort.setter
def dPort(self, value):
"""
This is a way to set both dFromPort and dToPort in a single shot
:param value:
:return:
"""
fields = re.split('[\s-]+', value)
if len(fields) > 1:
self.dFromPort = fields[0]
self.dToPort = fields[1]
elif len(fields) == 1:
self.dFromPort = fields[0]
self.dToPort = fields[0]
@property
def sFromPort(self):
return self._sFromPort
@sFromPort.setter
def sFromPort(self, value):
if value == 'unspecified' or value == 'any' or value is None:
self._sFromPort = 'any'
else:
if isinstance(value, str):
self._sFromPort = self._port_from_string(value)
else:
self._sFromPort = value
@property
def sToPort(self):
return self._sToPort
@sToPort.setter
def sToPort(self, value):
if value == 'unspecified' or value == 'any' or value is None:
self._sToPort = 'any'
else:
if isinstance(value, str):
self._sToPort = self._port_from_string(value)
else:
self._sToPort = value
@property
def sPort(self):
return '{0}-{1}'.format(self.sFromPort, self.sToPort)
@sPort.setter
def sPort(self, value):
"""
This is a way to set both sFromPort and sToPort in a single shot
:param value:
:return:
"""
fields = re.split('[\s-]+', value)
if len(fields) > 1:
self.sFromPort = fields[0]
self.sToPort = fields[1]
elif len(fields) == 1:
self.sFromPort = fields[0]
self.sToPort = fields[0]
@property
def etherT(self):
return self._etherT
@etherT.setter
def etherT(self, value):
if value == 'unspecified' or value is None:
self._etherT = 'any'
else:
self._etherT = value
@property
def prot(self):
return self._prot
@prot.setter
def prot(self, value):
if value == 'unspecified' or value is None:
self._prot = 'any'
else:
self._prot = value
if self.etherT == 'any':
if value in ['icmp','igmp','tcp','egp','igp','udp','icmpv6','eigrp','ospfigp','pim','l2tp']:
self.etherT = 'ip'
@property
def tcpRules(self):
return self._tcpRules
@tcpRules.setter
def tcpRules(self, value):
if value == 'unspecified':
self._tcpRules = 'any'
else:
self._tcpRules = value
if self.prot == 'any' and self.tcpRules != 'any':
self.prot = 'tcp'
def overlap(self, other):
"""
will return a ProtocolFilter that is the intersection of self and other
:param other:
:return:
"""
result = ProtocolFilter()
if self.applyToFrag != 'any' and other.applyToFrag != 'any':
if self.applyToFrag != other.applyToFrag:
return None
result.applyToFrag = other.applyToFrag if self.applyToFrag == 'any' else self.applyToFrag
if self.arpOpc != 'any' and other.arpOpc != 'any':
if self.arpOpc != other.arpOpc:
return None
result.arpOpc = other.arpOpc if self.arpOpc == 'any' else self.arpOpc
if self.dFromPort == 'any':
result.dFromPort = other.dFromPort
elif other.dFromPort == 'any':
result.dFromPort = self.dFromPort
else:
result.dFromPort = max(self.dFromPort, other.dFromPort)
if self.dToPort == 'any':
result.dToPort = other.dToPort
elif other.dToPort == 'any':
result.dToPort = self.dToPort
else:
result.dToPort = min(self.dToPort, other.dToPort)
if result.dFromPort > result.dToPort:
return None
if self.sFromPort == 'any':
result.sFromPort = other.sFromPort
elif other.sFromPort == 'any':
result.sFromPort = self.sFromPort
else:
result.sFromPort = max(self.sFromPort, other.sFromPort)
if self.sToPort == 'any':
result.sToPort = other.sToPort
elif other.sToPort == 'any':
result.sToPort = self.sToPort
else:
result.sToPort = min(self.sToPort, other.sToPort)
if result.sFromPort > result.sToPort:
return None
if self.etherT is not 'any' and other.etherT is not 'any':
if self.etherT != other.etherT:
return None
result.etherT = other.etherT if self.etherT is 'any' else self.etherT
if self.prot is not 'any' and other.prot is not 'any':
if self.prot != other.prot:
return None
result.prot = other.prot if self.prot is 'any' else self.prot
if self.tcpRules is not 'any' and other.tcpRules is not 'any':
if self.tcpRules != other.tcpRules:
return None
result.tcpRules = other.tcpRules if self.tcpRules is 'any' else self.tcpRules
return result
def __str__(self):
dport = '{0}-{1}'.format(self.dFromPort, self.dToPort)
sport = '{0}-{1}'.format(self.sFromPort, self.sToPort)
return '{0:4} {1:4} {2:11} {3:11}'.format(self.etherT,
self.arpOpc if self.etherT == 'arp' else self.prot,
dport,
sport)
def _port_equal(self, other):
if self.dFromPort != other.dFromPort:
return False
if self.dToPort != other.dToPort:
return False
if self.sFromPort != other.sFromPort:
return False
if self.sToPort != other.sToPort:
return False
return True
def __eq__(self, other):
if self.applyToFrag != other.applyToFrag:
return False
if self.arpOpc != other.arpOpc:
return False
if self.etherT != other.etherT:
return False
if not self._port_equal(other):
return False
if self.tcpRules != other.tcpRules:
return False
return True
def __gt__(self, other):
if self.dFromPort > other.dFromPort:
return True
if self.sFromPort > other.sFromPort:
return True
if self.dToPort > other.dToPort:
return True
if self.sToPort > other.sToPort:
return True
return False
def __ge__(self, other):
return self > other or self._port_equal(other)
def __lt__(self, other):
return not self >= other
def __le__(self, other):
return self < other or self._port_equal(other)
class SubFlowSpec(object):
"""
defines one side of a flow without the port numbers, i.e. either source or destination
"""
def __init__(self, tenant, context, ip):
self.tenant_name = tenant
self.context_name = context
self.ip = ip
class FlowSpec(object):
"""
This is a structure that holds a flow spec
"""
def __init__(self):
self._sip = [IpAddress('0/0')]
self._dip = [IpAddress('0/0')]
self.tenant_name = '*'
self.context_name = '*'
self.protocol_filter = []
def get_source(self):
return SubFlowSpec(self.tenant_name, self.context_name, self.sip)
def get_dest(self):
return SubFlowSpec(self.tenant_name, self.context_name, self.dip)
@property
def sip(self):
return self._sip
@sip.setter
def sip(self, value):
if isinstance(value, list):
self._sip = value
elif isinstance(value, str):
self._sip = [IpAddress(value)]
else:
assert isinstance(value, IpAddress)
self._sip = [value]
@property
def dip(self):
return self._dip
@dip.setter
def dip(self, value):
if isinstance(value, list):
self._dip = value
elif isinstance(value, str):
self._dip = [IpAddress(value)]
else:
assert isinstance(value, IpAddress)
self._dip = [value]
def __str__(self):
extras = max(len(self.sip), len(self.dip), len(self.protocol_filter))
full_sip = sorted(self.sip)
full_dip = sorted(self.dip)
tc = '{0}/{1}'.format(self.tenant_name, self.context_name)
line_format = '{0:20} {1:18} {2:18} {3:28}\n'
result = line_format.format(tc, full_sip[0], full_dip[0], self.protocol_filter[0])
if extras > 1:
for index in range(1, extras):
dip = ''
sip = ''
prot_filter = ''
if len(full_dip) > index:
dip = full_dip[index]
if len(full_sip) > index:
sip = full_sip[index]
if len(self.protocol_filter) > index:
prot_filter = str(self.protocol_filter[index])
result += line_format.format('', sip, dip, prot_filter)
return result
def __eq__(self, other):
if self.tenant_name != other.tenant_name:
return False
if self.context_name != other.context_name:
return False
set1 = set()
set2 = set()
for item in self.dip:
set1.add(str(item.prefix))
for item in other.dip:
set2.add(str(item.prefix))
if len(set1 ^ set2) > 0:
return False
set1 = set()
set2 = set()
for item in self.sip:
set1.add(str(item.prefix))
for item in other.sip:
set2.add(str(item.prefix))
if len(set1 ^ set2) > 0:
return False
set1 = set()
set2 = set()
for item in self.protocol_filter:
set1.add(str(item))
for item in other.protocol_filter:
set2.add(str(item))
if len(set1 ^ set2) > 0:
return False
return True
def __gt__(self, other):
"""
returns true if self is greater than other based on comparing sip and then dip
:param other:
:return:
"""
if self == other:
return False
if self.tenant_name > other.tenant_name:
return True
if self.tenant_name < other.tenant_name:
return False
if self.context_name > other.context_name:
return True
if self.context_name < other.context_name:
return False
num_comps = min(len(self.sip), len(other.sip))
for index in range(num_comps):
if self.sip[index] > other.sip[index]:
return True
elif self._sip[index] < other.sip[index]:
return False
if len(self.sip) > len(other.sip):
return True
elif len(self.sip) < len(other.sip):
return False
num_comps = min(len(self.dip), len(other.dip))
for index in range(num_comps):
if self.dip[index] > other.dip[index]:
return True
elif self.dip[index] < other.dip[index]:
return False
if len(self.dip) > len(other.dip):
return True
elif len(self.dip) < len(other.dip):
return False
return False
def __lt__(self, other):
if self == other:
return False
if self > other:
return False
return True
def __ge__(self, other):
if self == other:
return True
if self > other:
return True
return False
def __le__(self, other):
if self == other:
return True
if self < other:
return True
return False
class SearchDb(object):
"""
This class will build the database used by the search
"""
def __init__(self, session=None):
"""
Initially this will be built using just dictionaries. In the future, it may make sense to
create an SQL db to hold all of the info.
:return:
"""
self.epg_contract = {}
self.contract_filter = {}
self.session = session
self.context_radix = {}
self.tenants_by_name = {}
self.context_by_name = {}
self.initialized = False
def build(self, tenants=None):
"""
This will read in all of the model and from there build-out the data base
:param tenants:
:return:
"""
if tenants is None:
tenants = Tenant.get_deep(self.session)
for tenant in tenants:
self.tenants_by_name[tenant.name] = tenant
contexts = tenant.get_children(Context)
for context in contexts:
self.context_by_name[(tenant.name, context.name)] = context
app_profiles = tenant.get_children(AppProfile)
contracts = tenant.get_children(Contract)
outside_l3s = tenant.get_children(OutsideL3)
for app_profile in app_profiles:
epgs = app_profile.get_children(EPG)
self.build_ip_epg(epgs)
self.build_epg_contract(epgs)
for outside_l3 in outside_l3s:
self.build_ip_epg_outside_l3(outside_l3)
self.build_epg_contract_outside_l3(outside_l3)
self.build_contract_filter(contracts)
self.initialized = True
def build_ip_epg(self, epgs):
"""
This will build the ip to epg mapping
:param epgs:
"""
for epg in epgs:
eps = epg.get_children(Endpoint)
bridge_domain = epg.get_bd()
if bridge_domain is not None:
context = bridge_domain.get_context()
else:
context = None
if context not in self.context_radix:
self.context_radix[context] = radix.Radix()
for ep in eps:
ip = IpAddress(ep.ip)
node = self.context_radix[context].add(str(ip))
node.data['epg'] = epg
node.data['location'] = 'internal'
def build_ip_epg_outside_l3(self, outside_l3):
"""
will build ip_epg db from OutsideL3
:param outside_l3:
:return:
"""
context = outside_l3.get_context()
if context not in self.context_radix:
self.context_radix[context] = radix.Radix()
outside_epgs = outside_l3.get_children(OutsideEPG)
for outside_epg in outside_epgs:
subnets = outside_epg.get_children(OutsideNetwork)
for subnet in subnets:
ip = IpAddress(subnet.get_addr())
node = self.context_radix[context].add(str(ip))
node.data['epg'] = outside_epg
node.data['location'] = "external"
def show_ip_epg(self):
"""
Will simply print the ip_epg table
:return:
"""
for vrf in self.context_radix:
context = vrf
tenant = context.get_parent()
for node in self.context_radix[vrf]:
context_str = "{0}/{1}".format(tenant, context)
epg = node.data['epg']
app_profile = epg.get_parent()
print "{4:10} {0:40} {1:30} {2}/{3}".format(context_str, node.prefix, app_profile, epg,
node.data['location'])
def build_epg_contract(self, epgs):
"""
This will build the epg to contract mapping
:param epgs:
:return:
"""
for epg in epgs:
consumed_contracts = epg.get_all_consumed()
provided_contracts = epg.get_all_provided()
full_epg = epg
if full_epg not in self.epg_contract:
self.epg_contract[full_epg] = []
for contract in consumed_contracts:
contract_tenant = contract.get_parent()
contract_record = {'pro_con': 'consume',
'location': 'internal',
'contract': (contract_tenant, contract)}
self.epg_contract[full_epg].append(contract_record)
for contract in provided_contracts:
contract_tenant = contract.get_parent()
contract_record = {'pro_con': 'provide',
'location': 'internal',
'contract': (contract_tenant, contract)}
self.epg_contract[full_epg].append(contract_record)
def build_epg_contract_outside_l3(self, outside_l3):
outside_epgs = outside_l3.get_children(OutsideEPG)
for outside_epg in outside_epgs:
consumed_contracts = outside_epg.get_all_consumed()
provided_contracts = outside_epg.get_all_provided()
full_epg = outside_epg
if full_epg not in self.epg_contract:
self.epg_contract[full_epg] = []
for contract in consumed_contracts:
contract_tenant = contract.get_parent()
contract_record = {'pro_con': 'consume',
'location': 'external',
'contract': (contract_tenant, contract)}
self.epg_contract[full_epg].append(contract_record)
for contract in provided_contracts:
contract_tenant = contract.get_parent()
contract_record = {'pro_con': 'provide',
'location': 'external',
'contract': (contract_tenant, contract)}
self.epg_contract[full_epg].append(contract_record)
def show_epg_contract(self):
"""
Will simply print the epg_contract table
:return:
"""
for entry in self.epg_contract:
(epg_tenant, app_profile, epg) = entry
for contract_entry in self.epg_contract[entry]:
(contract_tenant, contract) = contract_entry['contract']
pro_con = contract_entry['pro_con']
int_ext = contract_entry['location']
print "{6:9} {0:20} {1:20} {2:20} {3:20} {4:20} {5:20}" \
.format(epg_tenant, app_profile, epg, pro_con, contract_tenant, contract, int_ext)
def build_contract_filter(self, contracts):
"""
This will build the contract to filter mapping
:param contracts:
:return:
"""
for contract in contracts:
tenant = contract.get_parent()
subjects = contract.get_children(ContractSubject)
if (tenant, contract) not in self.contract_filter:
self.contract_filter[(tenant, contract)] = []
for subject in subjects:
filters = subject.get_filters()
for aci_filter in filters:
filter_entries = aci_filter.get_children(FilterEntry)
for filter_entry in filter_entries:
self.contract_filter[(tenant, contract)].append(filter_entry)
def show_contract_filter(self):
for (tenant, contract) in self.contract_filter:
filters = self.contract_filter[(tenant, contract)]
for filter_entry in filters:
print "{0:20} {1:20} {2:20}".format(tenant, contract, filter_entry)
def search(self, flow_spec):
"""
Given a flow_spec, this will return a set of flow specs from the db that match
Match is defined as having a non-empty intersection.
The returned flow_specs will all be within the intersection.
The steps are:
find matching IP addresses for the source
find corresponding EPGs
find corresponding consumed contracts
find matching IP addresses for the destination
find corresponding EPGs
find corresponding provided contracts
find intersection of contracts
build flow specs.
:param flow_spec:
:return:
"""
result = []
# first convert name of tenant and context to tenant and context objects
consumed_contracts = self.find_contracts(flow_spec.get_source(), 'consume')
provided_contracts = self.find_contracts(flow_spec.get_dest(), 'provide')
connections = []
for c_contract in consumed_contracts:
for p_contract in provided_contracts:
if c_contract['contract'] == p_contract['contract']:
connections.append({'source': c_contract['prefix'],
'source_epg': c_contract['epg'],
'dest': p_contract['prefix'],
'dest_epg': p_contract['epg'],
'contract': c_contract['contract']})
for connection in connections:
filters = self.contract_filter[connection['contract']]
matching_filters = []
for aci_filter in filters:
for fs_p_filter in flow_spec.protocol_filter:
overlap_filter = fs_p_filter.overlap(ProtocolFilter(aci_filter))
if overlap_filter is not None:
matching_filters.append(overlap_filter)
if len(matching_filters) > 0:
result.append(self._build_result_flow_spec(connection, matching_filters))
# for flow_spec in result:
# print flow_spec
return result
@staticmethod
def _build_result_flow_spec(connection, matching_filters):
result = FlowSpec()
result.tenant_name = connection['source_epg'].get_parent().get_parent().name
source_epg = connection['source_epg']
if isinstance(source_epg, OutsideEPG):
result.context_name = source_epg.get_parent().get_context().name
else:
result.context_name = source_epg.get_bd().get_context().name
result.sip = connection['source']
result.dip = connection['dest']
result.protocol_filter = matching_filters
return result
def find_contracts(self, subflow_spec, pro_con):
"""
This will find all the contracts that are either provided or consumed by the
subflow_spec
:param subflow_spec:
:param pro_con:
:return:
"""
tenants = []
tenant_search = '^' + subflow_spec.tenant_name.replace('*', '.*') + '$'
for tenant_name in self.tenants_by_name:
match_result = re.match(tenant_search, tenant_name)
if match_result is not None:
tenants.append(self.tenants_by_name[tenant_name])
contexts = []
context_search = '^' + subflow_spec.context_name.replace('*', '.*') + '$'
for tenant in tenants:
for (tenant_name, context_name) in self.context_by_name:
match_result = re.match(context_search, context_name)
if match_result is not None and tenant_name == tenant.name:
contexts.append(self.context_by_name[(tenant_name, context_name)])
epgs_prefix = {}
nodes = []
for context in contexts:
if context in self.context_radix:
# cover both the case where what we are looking for is covered by a prefix
# and where it covers more than one address.
for ip in subflow_spec.ip:
node = self.context_radix[context].search_best(str(ip.prefix))
if node is not None:
if node not in nodes:
nodes.append(node)
temp_nodes = self.context_radix[context].search_covered(str(ip.prefix))
for node in temp_nodes:
if node not in nodes:
nodes.append(node)
# now have all the nodes
if nodes is not None:
for node in nodes:
for ip in subflow_spec.ip:
ovlp = ip.overlap(IpAddress(node.prefix))
if ovlp is not None:
if node.data['epg'] not in epgs_prefix:
epgs_prefix[node.data['epg']] = []
if ovlp not in epgs_prefix[node.data['epg']]:
epgs_prefix[node.data['epg']].append(ovlp)
result = []
for epg in epgs_prefix:
if epg in self.epg_contract:
for entry in self.epg_contract[epg]:
if entry['pro_con'] == pro_con:
result.append({'contract': entry['contract'],
'prefix': IpAddress.simplify(epgs_prefix[epg]),
'epg': epg})
return result
def parse_port_range(text):
"""
This will parse a layer 4 port range or single value
and return a from and to value
:param text:
:return:
"""
match_result = re.match('(\d+)\W*-\W*(\d+)', text)
if match_result is not None:
return match_result.group(1), match_result.group(2)
elif text == 'any':
return 'any', 'any'
else:
match_result = re.match('^(\d+)$', text)
if match_result is None:
raise ValueError('Value error in port range. Must be either single number or "#-#". Value given:' + text)
else:
return text, text
def build_flow_spec_from_args(args):
"""
Will build a flow spec from the command line arguments
:param args:
:return:
"""
flow_spec = FlowSpec()
flow_spec.tenant_name = args.tenant
flow_spec.context_name = args.context
flow_spec.sip = [IpAddress(args.sip)]
flow_spec.dip = [IpAddress(args.dip)]
filt = ProtocolFilter()
flow_spec.protocol_filter.append(filt)
filt.applyToFrag = args.applyToFrag
filt.arpOpc = args.arpOpc
filt.etherT = args.etherT
filt.prot = args.prot
filt.tcpRules = args.tcpRules
(filt.dFromPort, filt.dToPort) = parse_port_range(args.dport)
(filt.sFromPort, filt.sToPort) = parse_port_range(args.sport)
return flow_spec
def main():
"""
Main execution path when run from the command line
"""
# Get all the arguments
description = 'Connection Search tool for APIC.'
creds = Credentials('apic', description)
creds.add_argument('-tenant', type=str, default='*', help='Tenant name (wildcards, "*", accepted), default "*"')
creds.add_argument('-context', type=str, default='*', help='Tenant name (wildcards, "*", accepted), default "*"')
creds.add_argument('-sip', type=str, default='0/0', help='Source IP or subnet - e.g. 1.2.3.4/24, default: "0/0"')
creds.add_argument('-dip', type=str, default='0/0',
help='Destination IP or subnet - e.g. 1.2.3.4/24, default: "0/0"')
creds.add_argument('-dport', type=str, default='any',
help='Destination L4 Port value or range, e.g. 20-25 or 80. Default: "any"')
creds.add_argument('-sport', type=str, default='any',
help='Source L4 Port value or range, e.g. 20-25 or 80. Default: "any"')
creds.add_argument('-etherT', type=str, default='any', help='EtherType, e.g. "ip", "arp", "icmp". Default: "any"')
creds.add_argument('-prot', type=str, default='any', help='Protocol, e.g. "tcp", "udp". Default: "any"')
creds.add_argument('-arpOpc', type=str, default='any', help='ARP Opcode, e.g. "req", "ack". Default: "any"')
creds.add_argument('-applyToFrag', type=str, default='any',
help='Apply to fragment, e.g. "yes", "no". Default: "any"')
creds.add_argument('-tcpRules', type=str, default='any', help='TCP rules, e.g. "syn", "fin". Default: "any"')
args = creds.get()
flow_spec = build_flow_spec_from_args(args)
# todo: verify that a dash can be used in port range.
# Login to APIC
session = Session(args.url, args.login, args.password)
resp = session.login()
if not resp.ok:
print '%% Could not login to APIC'
sys.exit(0)
sdb = SearchDb(session)
sdb.build()
results = sorted(sdb.search(flow_spec))
for result in results:
print result
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| [
"edsall@cisco.com"
] | edsall@cisco.com |
5924cabf2086eb1004403a08e7cae90926f54713 | 19a69a23bb7e9482eee476fdc9255a0480a268e7 | /paysage/metrics/generator_metrics.py | 360211f1eaab69ce5aa955dc34ff9751814dd141 | [
"MIT"
] | permissive | shevisj/paysage | a6f3fb6f0ccdb041bbbd68fa86e8be1e683390f4 | 5275d56c3caac7ddb1b1baa87060f0b7c3a54aa5 | refs/heads/master | 2023-04-26T01:13:33.482572 | 2021-05-23T08:08:38 | 2021-05-23T08:08:38 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,830 | py | from math import sqrt, log
from .. import math_utils
from .. import backends as be
# ----- CLASSES ----- #
class ReconstructionError(object):
"""
Compute the root-mean-squared error between observations and their
reconstructions using minibatches, rescaled by the minibatch variance.
"""
def __init__(self, name='ReconstructionError'):
"""
Create a ReconstructionError object.
Args:
name (str; optional): metric name
Returns:
ReconstructionError
"""
self.calc = math_utils.MeanCalculator()
self.name = name
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
def update(self, assessment) -> None:
"""
Update the estimate for the reconstruction error using a batch
of observations and a batch of reconstructions.
Args:
assessment (ModelAssessment): uses data_state and reconstructions
Returns:
None
"""
var = be.EPSILON + be.var(assessment.data_state.get_visible())
rec = assessment.reconstructions.get_visible()
state = assessment.data_state.get_visible()
mse = be.mean(be.square(be.subtract(rec, state)), axis=1) / var
self.calc.update(mse)
def value(self) -> float:
"""
Get the value of the reconstruction error.
Args:
None
Returns:
reconstruction error (float)
"""
if self.calc.num is not None:
return sqrt(self.calc.mean)
return None
class EnergyCoefficient(object):
"""
Compute a normalized energy distance between two distributions using
minibatches of sampled configurations.
Szekely, G.J. (2002)
E-statistics: The Energy of Statistical Samples.
Technical Report BGSU No 02-16.
"""
def __init__(self, name='EnergyCoefficient'):
"""
Create EnergyCoefficient object.
Args:
None
Returns:
EnergyCoefficient object
"""
self.calc = math_utils.MeanCalculator()
self.name = name
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
def _energy_coefficient(self, x, y):
"""
Compute the energy coefficient.
Args:
x (tensor ~ (num_samples_x, num_units))
y (tensor ~ (num_samples_y, num_units))
Returns:
float
"""
d1 = be.mean(math_utils.pdist(x, y))
d2 = be.mean(math_utils.pdist(x, x))
d3 = be.mean(math_utils.pdist(y, y))
return sqrt(max(0, (2*d1 - d2 - d3) / max(2*d1, be.EPSILON)))
def update(self, assessment) -> None:
"""
Update the estimate for the energy coefficient using a batch
of observations and a batch of fantasy particles.
Args:
assessment (ModelAssessment): uses data_state and model_state
Returns:
None
"""
ecoeff = self._energy_coefficient(assessment.data_state[0],
assessment.model_state[0])
self.calc.update(be.float_tensor([ecoeff]))
def value(self) -> float:
"""
Get the value of the energy coefficient.
Args:
None
Returns:
energy coefficient (float)
"""
if self.calc.num is not None:
return self.calc.mean
return None
class KLDivergence(object):
"""
Compute the KL divergence between two samples using the method of:
"Divergence Estimation for Multidimensional Densities Via k-Nearest Neighbor
Distances"
by Qing Wang, Sanjeev R. Kulkarni and Sergio Verdú
KL(P || Q) = \int dx p(x) log(p(x)/q(x))
p ~ data samples
q ~ model samples
We provide the option to remove dependence on dimension, true by default.
"""
def __init__(self, k=5, name='KLDivergence', divide_dimension=True):
"""
Create KLDivergence object.
Args:
k (int; optional): which nearest neighbor to use
name (str; optional): metric name
divide_dimension (bool; optional): whether to divide the divergence
by the number of dimensions
Returns:
KLDivergence object
"""
self.calc = math_utils.MeanCalculator()
self.k = k
self.name = name
self.divide_dim = divide_dimension
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
@classmethod
def klpq(cls, x, y, k, divide_dim):
"""
Compute the forward KL divergence.
Args:
x (tensor ~ (num_samples_x, num_units))
y (tensor ~ (num_samples_y, num_units))
k (int)
Returns:
float
"""
n = len(x)
m = len(y)
_, x_dist = math_utils.find_k_nearest_neighbors(x, x, k+1)
_, y_dist = math_utils.find_k_nearest_neighbors(x, y, k)
be.clip_(x_dist, a_min = be.EPSILON)
be.clip_(y_dist, a_min = be.EPSILON)
if divide_dim:
d = 1.0
else:
d = be.shape(x)[1] # the dimension of the space
return d*be.tsum(be.log(y_dist / x_dist))/n + log(m/(n-1))
def update(self, assessment) -> None:
"""
Update the estimate for the KL divergence using a batch
of observations and a batch of fantasy particles.
Args:
assessment (ModelAssessment): uses data_state and model_state
Returns:
None
"""
klpq = self.klpq(assessment.data_state[0], assessment.model_state[0],
self.k, self.divide_dim)
self.calc.update(be.float_tensor([klpq]))
def value(self) -> float:
"""
Get the value of the KL divergence estimation.
Args:
None
Returns:
KL divergence estimation (float)
"""
if self.calc.num is not None:
return self.calc.mean
return None
class ReverseKLDivergence(object):
"""
Compute the reverse KL divergence between two samples using the method of:
"Divergence Estimation for Multidimensional Densities Via k-Nearest Neighbor
Distances"
by Qing Wang, Sanjeev R. Kulkarni and Sergio Verdú
KL(P || Q) = \int dx p(x) log(p(x)/q(x))
p ~ model samples
q ~ data samples
We provide the option to divide out the dimension.
"""
def __init__(self, k=5, name='ReverseKLDivergence', divide_dimension=True):
"""
Create ReverseKLDivergence object.
Args:
k (int; optional): which nearest neighbor to use
name (str; optional): metric name
divide_dimension (bool; optional): whether to divide the divergence
by the number of dimensions
Returns:
ReverseKLDivergence object
"""
self.calc = math_utils.MeanCalculator()
self.k = k
self.name = name
self.divide_dim = divide_dimension
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
@classmethod
def klqp(cls, y, x, k, divide_dim):
"""
Compute the KL divergence.
Args:
y (tensor ~ (num_samples_y, num_units))
x (tensor ~ (num_samples_x, num_units))
Returns:
float
"""
n = len(x)
m = len(y)
_, x_dist = math_utils.find_k_nearest_neighbors(x, x, k+1)
_, y_dist = math_utils.find_k_nearest_neighbors(x, y, k)
be.clip_(x_dist, a_min = be.EPSILON)
be.clip_(y_dist, a_min = be.EPSILON)
if divide_dim:
d = 1.0
else:
d = be.shape(x)[1] # the dimension of the space
return d*be.tsum(be.log(y_dist / x_dist))/n + log(m/(n-1))
def update(self, assessment) -> None:
"""
Update the estimate for the reverse KL divergence using a batch
of observations and a batch of fantasy particles.
Args:
assessment (ModelAssessment): uses data_state and model_state
Returns:
None
"""
klqp = self.klqp(assessment.data_state[0], assessment.model_state[0],
self.k, self.divide_dim)
self.calc.update(be.float_tensor([klqp]))
def value(self) -> float:
"""
Get the value of the reverse KL divergence estimate.
Args:
None
Returns:
reverse KL divergence estimate (float)
"""
if self.calc.num is not None:
return self.calc.mean
return None
class JensenShannonDivergence(object):
"""
Compute the JS divergence between two samples using the method of:
"Divergence Estimation for Multidimensional Densities Via k-Nearest Neighbor
Distances"
by Qing Wang, Sanjeev R. Kulkarni and Sergio Verdú
JS(P || Q) = 1/2*KL(P || 1/2(P + Q)) + 1/2*KL(Q || 1/2(P + Q))
p ~ model samples
q ~ data samples
We provide the option to divide out by the dimension of the dataset.
"""
def __init__(self, k=5, name='JensenShannonDivergence', divide_dimension=True):
"""
Create JensenShannonKLDivergence object.
Args:
k (int; optional): which nearest neighbor to use
name (str; optional): metric name
divide_dimension (bool; optional): whether to divide the divergence
by the number of dimensions
Returns:
JensenShannonDivergence object
"""
self.calc = math_utils.MeanCalculator()
self.k = k
self.name = name
self.divide_dim = divide_dimension
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
def _js(self, x, y):
"""
Compute the Jensen-Shannon divergence.
Args:
x (tensor ~ (num_samples_x, num_units))
y (tensor ~ (num_samples_y, num_units))
Returns:
float
"""
js = 0
n = len(x)
m = len(y)
if self.divide_dim:
d = 1.0
else:
d = be.shape(x)[1] # the dimension of the space
_, x_dist = math_utils.find_k_nearest_neighbors(x, x, self.k+1)
_, y_dist = math_utils.find_k_nearest_neighbors(x, y, self.k)
be.clip_(x_dist, a_min = be.EPSILON)
be.clip_(y_dist, a_min = be.EPSILON)
r = x_dist / y_dist
js += log(2) - \
be.tsum(be.logaddexp(be.zeros_like(r), log((n-1)/m) + d*be.log(r)))/n
n = len(y)
m = len(x)
_, x_dist = math_utils.find_k_nearest_neighbors(y, y, self.k+1)
_, y_dist = math_utils.find_k_nearest_neighbors(y, x, self.k)
be.clip_(x_dist, a_min = be.EPSILON)
be.clip_(y_dist, a_min = be.EPSILON)
r = x_dist / y_dist
js += log(2) - \
be.tsum(be.logaddexp(be.zeros_like(r), log((n-1)/m) + d*be.log(r)))/n
return 0.5*js
def update(self, assessment) -> None:
"""
Update the estimate for the JS divergence using a batch
of observations and a batch of fantasy particles.
Args:
assessment (ModelAssessment): uses data_state and model_state
Returns:
None
"""
js = self._js(assessment.data_state[0], assessment.model_state[0])
self.calc.update(be.float_tensor([js]))
def value(self) -> float:
"""
Get the value of the reverse JS divergence estimate.
Args:
None
Returns:
JS divergence estimate (float)
"""
if self.calc.num is not None:
return self.calc.mean
return None
class FrechetScore(object):
"""
Compute the Frechet Score between two samples. Based on an idea from:
"GANs Trained by a Two Time-Scale Update Rule Converge to a
Local Nash Equilibrium"
by Martin Heusel, Hubert Ramsauer, Thomas Unterthiner, Bernhard Nessler
Sepp Hochreiter
but without the inception network.
"""
def __init__(self, name='FrechetScore'):
"""
Create FrechetScore object.
Args:
None
Returns:
FrechetScore object
"""
self.calc = math_utils.MeanCalculator()
self.name = name
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
def _fid(self, x, y):
"""
Compute the Frechet Score.
Args:
x (tensor ~ (num_samples_x, num_units)): data
y (tensor ~ (num_samples_y, num_units)): fantasy
Returns:
float
"""
m1 = be.mean(x, axis=0)
m2 = be.mean(y, axis=0)
C1 = be.cov(x, x)
C2 = be.cov(y, y)
result = be.tsum(be.square(m1 - m2))
result += be.tsum(be.diag(C1))
result += be.tsum(be.diag(C2))
tmp = be.matrix_sqrt(be.dot(C1, C2))
result -= 2 * be.tsum(be.diag(tmp))
return result
def update(self, assessment) -> None:
"""
Update the estimate for the Frechet Score using a batch
of observations and a batch of fantasy particles.
Args:
assessment (ModelAssessment): uses data_state and model_state
Returns:
None
"""
fid = self._fid(assessment.data_state[0], assessment.model_state[0])
self.calc.update(be.float_tensor(fid))
def value(self) -> float:
"""
Get the value of the Frechet Score estimate.
Args:
None
Returns:
Frechet Score estimate (float)
"""
if self.calc.num is not None:
return self.calc.mean
return None
class HeatCapacity(object):
"""
Compute the heat capacity of the model per parameter.
We take the HC to be the second cumulant of the energy, or alternately
the negative second derivative with respect to inverse temperature of
the Gibbs free energy. In order to estimate this quantity we perform
Gibbs sampling starting from random samples drawn from the visible layer's
distribution. This is rescaled by the number of units parameters in the model.
"""
def __init__(self, name='HeatCapacity'):
"""
Create HeatCapacity object.
Args:
None
Returns:
None
"""
self.calc = math_utils.MeanVarianceCalculator()
self.name = name
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
def update(self, assessment) -> None:
"""
Update the estimate for the heat capacity.
Args:
assessment (ModelAssessment): uses model and model_state
Returns:
None
"""
energy = assessment.model.joint_energy(assessment.model_state)
self.num_params = assessment.model.num_parameters()
self.calc.update(energy / sqrt(sqrt(assessment.model.num_parameters())))
def value(self) -> float:
"""
Get the value of the heat capacity.
Args:
None
Returns:
heat capacity (float)
"""
if self.calc.num:
return self.calc.var
return None
class WeightSparsity(object):
"""
Compute the weight sparsity of the model as the formula
p = \sum_j(\sum_i w_ij^2)^2/\sum_i w_ij^4
Tubiana, J., Monasson, R. (2017)
Emergence of Compositional Representations in Restricted Boltzmann Machines,
PRL 118, 138301 (2017)
"""
def __init__(self, name='WeightSparsity'):
"""
Create WeightSparsity object.
Args:
None
Returns:
WeightSparsity object
"""
self.p = None
self.name = name
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.p = None
def update(self, assessment) -> None:
"""
Compute the weight sparsity of the model
Notes:
If the value already exists, it is not updated.
Call reset() between model updates.
Args:
assessment (ModelAssessment): uses model
Returns:
None
"""
if self.p is not None:
return
# TODO: should this use the weights of all of the layers?
w = assessment.model.connections[0].weights.W()
(n,m) = be.shape(w)
w2 = be.square(w)
w4 = be.square(w2)
self.p = 1.0/float(n*m) * be.tsum(be.square(be.tsum(w2, axis=0))/ be.tsum(w4, axis=0))
def value(self) -> float:
"""
Get the value of the weight sparsity.
Args:
None
Returns:
weight sparsity (float)
"""
if self.p is not None:
return self.p
return None
class WeightSquare(object):
"""
Compute the mean squared weights of the model per hidden unit
w2 = 1/(#hidden units)*\sum_ij w_ij^2
Tubiana, J., Monasson, R. (2017)
Emergence of Compositional Representations in Restricted Boltzmann Machines,
PRL 118, 138301 (2017)
"""
def __init__(self, name='WeightSquare'):
"""
Create WeightSquare object.
Args:
None
Returns:
WeightSquare object
"""
self.mw2 = None
self.name = name
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.mw2 = None
def update(self, assessment) -> None:
"""
Compute the weight square of the model.
Notes:
If the value already exists, it is not updated.
Call reset() between model updates.
Args:
assessment (ModelAssessment): uses model
Returns:
None
"""
if self.mw2 is not None:
return
# TODO: should this use the weights of all of the layers?
w = assessment.model.connections[0].weights.W()
(_,m) = be.shape(w)
w2 = be.square(w)
self.mw2 = 1.0/float(m) * be.tsum(w2)
def value(self) -> float:
"""
Get the value of the weight sparsity.
Args:
None
Returns:
weight sparsity (float)
"""
if self.mw2 is not None:
return self.mw2
return None
class TAPFreeEnergy(object):
"""
Compute the TAP2 free energy of the model seeded from some number of
random magnetizations. This value approximates -lnZ_model
"""
def __init__(self, num_samples=2, name='TAPFreeEnergy'):
"""
Create TAPFreeEnergy object.
Args:
num_samples (int): number of samples to average over
Returns:
None
"""
self.calc = math_utils.MeanVarianceCalculator()
self.num_samples = num_samples
self.name = name
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
def update(self, assessment) -> None:
"""
Update the estimate for the TAP free energy.
Args:
assessment (ModelAssessment): uses model
Returns:
None
"""
for _ in range(self.num_samples):
_, fe = assessment.model.compute_StateTAP()
self.calc.update(be.float_tensor([fe]))
def value(self) -> float:
"""
Get the average TAP free energy.
Args:
None
Returns:
the average TAP free energy (float)
"""
if self.calc.num:
return self.calc.mean
return None
class TAPLogLikelihood(object):
"""
Compute the log likelihood of the data using the TAP2 approximation of -lnZ_model
"""
def __init__(self, num_samples=2, name='TAPLogLikelihood'):
"""
Create TAPLogLikelihood object.
Args:
num_samples (int): number of samples to average over
Returns:
None
"""
self.calc = math_utils.MeanVarianceCalculator()
self.num_samples = num_samples
self.name = name
def reset(self) -> None:
"""
Reset the metric to its initial state.
Args:
None
Returns:
None
"""
self.calc.reset()
def update(self, assessment) -> None:
"""
Update the estimate for the TAP free energy and the marginal free energy
(actually the average per sample)
Args:
assessment (ModelAssessment): uses model
Returns:
None
"""
state = assessment.data_state
rbm = assessment.model
stepsize = be.shape(state[0])[0]
for _ in range(self.num_samples):
_, TAP_fe = rbm.compute_StateTAP()
vis = -be.tsum(rbm.layers[0].energy(state[0]))
c_params = rbm.layers[1].conditional_params(
rbm._connected_rescaled_units(1, state),
rbm._connected_weights(1))
marginal_fe = vis + be.tsum(
rbm.layers[1].log_partition_function(c_params, be.zeros_like(c_params)))
self.calc.update(be.float_tensor([TAP_fe + marginal_fe/stepsize]))
def value(self) -> float:
"""
Get the average TAP log likelihood.
Args:
None
Returns:
the average TAP log likelihood (float)
"""
if self.calc.num:
return self.calc.mean
return None
| [
"drams@unlearn.ai"
] | drams@unlearn.ai |
c6457469c53f03e15ae67afb29640f320a9abd3d | d0fcb7c72118e966181cd21c5ac20c732e386d5f | /__init__.py | e454393c8f6ec93f4294affaf3787293b62e67a7 | [] | no_license | gitter-badger/ckey_ref | 0b7940d2672511a2afe2ef0f21c1e5536276f781 | e60d38f2d975e330787bbb00f5983d56fb70802c | refs/heads/ckey | 2021-01-19T11:57:41.972264 | 2017-02-07T11:42:55 | 2017-02-07T11:42:55 | 82,273,478 | 0 | 0 | null | 2017-02-17T08:12:35 | 2017-02-17T08:12:35 | null | UTF-8 | Python | false | false | 76,502 | py | # -*- coding: utf-8 -*-
# Filename : jcf.py
# Date & Author : 2013-11-20, Chris Nelson
# Platform: CentOS 6.2, Python 2.6.6
# Requirement: none
# Revision: 1
# (c) 2013 Hewlett-Packard Company. All Rights Reserved
''' jcf class
implements core
JCF represents a Job Configuration File -- the raw data without any special
decoding of objects. It is responsible for processing a JCF to merge includes,
replace system variables, and ckeys.
Usage:
jcf = JCF(file)
or
jcf = JCF(job_data)
Second form is to support the Job subclass which may feed in pre-parsed json
data.
To process includes, variables, etc.
jcf.process()
And to save results to same file or a new file
jcf.save()
or
jcf.save(new_file)
'''
import json
from os.path import isfile, dirname, basename, join, abspath
from pprint import pprint, pformat
from copy import copy, deepcopy
import re
import random
import time
import logging
from .. import util
# Custom exceptions used internally for this class
class FlowError(Exception):
def __init__(self, message):
self.message = message
class JCF(object):
# Sections - this list contains all sections (class members) that should be
# exported to JSON, it also serves to check if any of these keys are not
# recognized (this will cause a ValueError)
section_members = [
"info",
"init_stage",
"tags",
"include",
"included",
"local",
"suts",
"stages",
"ckey",
"local_ckey",
"ckey_template",
"origination",
"module_info",
"dynamic",
"job_timeout",
"include_options",
"is_global",
"configure",
"optionTextMap",
"id",
"job_group"
]
# Flow Controls - this is a list of all valid flow controls
flow_controls = [
"next_default",
"next_pass",
"next_fail",
"next_timeout"
]
# Flow Control Values - this is a list of all valid special flow control values
flow_control_values = [
"_next",
"_quit"
]
def __init__(self, json_src={}, max_depth=100, serial=None, default_owner=None):
# Internal
self._raw = None
self._serial = serial
self.auto_init_stage = None
# Members that represent a section within a JCF
self.info = None
self.init_stage = None
self.tags = None
self.include = None
self.included = None
self.local = None
self.suts = None
self.stages = None
self.ckey = None
self.local_ckey = None
self.ckey_template = None
self.origination = None
self.module_info = None
self.dynamic = None
self.job_timeout = None
self.include_options = None
self.is_global = None
self.configure = None
self.optionTextMap = None
self.id = None
self.job_group = None
# Members that represent other data
self.path = None
self.templates = None
self.max_depth = max_depth
self.order = 1
self.interpolation_errors = list()
self.default_name = "cirrus_job"
if isinstance(json_src, dict):
# Preparsed JSON
raw = deepcopy(json_src)
elif isinstance(json_src, list):
# Lines from a file
# Combine into single string and then make into JSON
json_src = "\n".join(json_src)
raw = util.read_json_str(json_src)
else:
# A file name
# This will search for the file and add the .json suffix if needed
self.path = util.retrieve_file(json_src, exts=[".json"])
raw = util.read_json(self.path)
# Create a default name
if self.path:
default_name = basename(self.path)
if default_name.endswith(".json"):
default_name = default_name[:-5]
else:
default_name = self.default_name
# Set defaults for all members
self.info = raw.get("info", dict())
self.init_stage = raw.get("init_stage", None)
self.tags = raw.get("tags", [])
self.include = raw.get("include", {})
self.included = raw.get("included", [])
self.local = raw.get("local", dict())
self.tags = raw.get("tags", [])
self.suts = raw.get("suts", dict())
self.stages = raw.get("stages", {})
self.ckey = raw.get("ckey", dict())
self.local_ckey = raw.get("local_ckey", dict())
self.ckey_template = raw.get("ckey_template", dict())
self.module_info = raw.get("module_info", dict())
self.dynamic = raw.get("dynamic", dict())
self.origination = raw.get("origination", None)
self.job_timeout = raw.get("job_timeout", None)
self.configure = raw.get("configure", None)
self.optionTextMap = raw.get("optionTextMap", None)
self.include_options = raw.get("include_options", None)
self.id = raw.get("id", None)
self.job_group = raw.get("job_group", [])
self._raw = raw
self.is_global = raw.get("is_global", dict())
# Import or create serial
if "info" in raw and "_serial" in raw["info"]:
self._serial = raw["info"]["_serial"]
self.info["_serial"] = self._serial
elif self._serial:
raw["info"]["_serial"] = self._serial
self.info["_serial"] = self._serial
else:
self._serial = default_name + "_" + \
str(random.randint(1000000000, 4000000000))
if "info" not in raw:
raw["info"] = dict()
raw["info"]["_serial"] = self._serial
self.info["_serial"] = self._serial
# Store local information
if self._serial not in self.local:
self.local[self._serial] = dict()
# Move local ckeys to local area and delete originals
self.local[self._serial].update(self.local_ckey)
self.local_ckey = dict()
# Backward compatibility check for name, desc
if "name" in raw:
self.info["name"] = raw["name"]
del raw["name"]
if "desc" in raw:
self.info["desc"] = raw["desc"]
del raw["desc"]
# Set default info section content if not present
if "name" not in self.info:
self.info["name"] = default_name
if "desc" not in self.info:
self.info["desc"] = ''
if "login_name" not in self.info:
self.info["login_name"] = default_owner if default_owner is not None else "anonymous"
# Sanity checks
# Are there unrecognized sections?
unrecognized = []
for k in raw.keys():
if k not in self.section_members:
unrecognized.append(k)
if unrecognized:
raise ValueError("JCF " + str(self.path) +
" contains unrecognized sections: " + ",".join(unrecognized))
# Extract template names from includes. Do not do this recursively
# because at this time we just want to know if the immediate descendants
# are templates or not.
if self.include:
if self.path:
include_paths = [dirname(self.path), "."]
else:
include_paths = ["."]
self.templates = []
if self.max_depth:
for n in range(len(self.include)):
# Handle simple include format
if isinstance(self.include[n], str) or isinstance(self.include[n], unicode):
include_file = self.include[n]
self.include[n] = {
"id": include_file
}
else:
if "id" in self.include[n]:
include_file = self.include[n]["id"]
else:
raise ValueError("include structure is missing 'id' field")
# Create temporary JCF object, depth to 0 to avoid
# recursion
try:
local_file = util.retrieve_file(include_file,
include_paths,
[".json"],
remote=False)
included_jcf = JCF(local_file, max_depth=0)
if "template" in included_jcf.tags:
self.templates.append(included_jcf.path)
except ValueError, IOError:
# Skip any that we cannot read
pass
# Set origination for this JCF--the local IP
# TODO: this will work for current activities which mainly validate that
# the current system is the one we think it is for stage execution
# (code in agent.py) but this needs to be refined to identify primary
# IPs in multi-homed hosts.
# See also: sys_cirrus setting
if not self.origination:
self.origination = unicode(util.get_preferred_local_ip())
# Propagate serial numbers to stages
stage_list = list()
if isinstance(self.stages, dict):
stage_list = self.stages.values()
elif isinstance(self.stages, list):
for s in self.stages:
if isinstance(s, dict):
stage_list.extend(s.values())
else:
raise ValueError("JCF " + str(self.path) +
" stages section is corrupt, each list " +
" element must contain exactly one stage")
for s in stage_list:
if "_serial" not in s or not s["_serial"]:
s["_serial"] = self._serial
def __str__(self):
jcfstr = "<JCF> " + str(self.path) + "\n"
return jcfstr
def print_jcf(self, indent=0):
s = " " * indent
print s + "JCF: " + str(self.path)
print s + " name: " + self.info["name"]
print s + " desc: " + self.info["desc"]
print s + " serial id: " + self._serial
print s + " origination: " + self.origination
print s + " tags: " + pformat(self.tags)
print s + " include: " + pformat(self.include)
print s + " included: " + pformat(self.included)
if self.info:
print " info:"
pprint(self.info, indent=8 + indent)
if self.job_timeout:
print " job_timeout:", self.job_timeout
if self.configure:
print " configure:"
pprint(self.configure, indent=8 + indent)
if self.suts:
print " suts:"
pprint(self.suts, indent=8 + indent)
if self.stages:
print " stages:"
pprint(self.stages, indent=8 + indent)
if self.ckey:
print " ckey:"
pprint(self.ckey, indent=8 + indent)
if self.local:
print " local:"
pprint(self.local, indent=8 + indent)
if self.ckey_template:
print " ckey_template:"
pprint(self.ckey_template, indent=8 + indent)
if self.module_info:
print " module_info:"
pprint(self.module_info, indent=8 + indent)
if self.dynamic:
print " dynamic:"
pprint(self.dynamic, indent=8 + indent)
if self.job_group:
print " job_group:"
pprint(self.job_group, indent=8 + indent)
print
def print_raw(self):
print util.write_json_pretty_str(self.get_raw())
def print_sequence(self):
print "File serial:", self._serial
os = self.get_ordered_stages()
for id, data in os:
print "{0:20} P->{1:20} F->{2:20} ({3})".format(id,
data.get("next_pass", "-"),
data.get("next_fail", "-"),
data.get("next_default", "-"))
def process(self, recursive=True):
'''
Process all elements of a JCF so that it is in it's final form
'''
self.process_stages()
self.process_includes()
self.process_stages()
self.process_system_vars()
self.process_file_vars()
def create_stage_specific_jcf_file(self, stage=None, filename="job.json"):
self.process_stage_vars(stage)
# Process JCF system data
self.process_system_vars()
# Create modified job control file in working area
# Accepts linux or windows or "c:\" type filenames
if filename and (filename.startswith("/") or
filename.startswith("\\") or
(len(filename) > 1 and filename[1] == ":")):
new_jcf = filename
elif self.path:
new_jcf = join(dirname(self.path), filename)
else:
raise ValueError("filename must be a full path or JCF object " +
"must be created from a file originally so " +
"path member is set")
self.write(new_jcf)
# Update path
self.path = new_jcf
def process_includes(self, depth=0):
'''
!!!!!!!!!!!!!!!!!!!!!!!!!!!!
IF YOU CHANGE THIS METHOD BE
SURE TO UPDATE THE PARALLEL
METHOD IN JOB.PY IF NEEDED
!!!!!!!!!!!!!!!!!!!!!!!!!!!!
Merges included JCFs
max_depth - indicates how deep to read the tree. Default is a high
number to catch circular includes.
-1 = process to infinite depth
0 = do not process includes (makes this method a noop)
>0 = process to the indicated depth
'''
# TODO: detect circular includes or just rely on the max_depth setting?
# Check if depth limit has been reached
if self.max_depth != -1 and depth >= self.max_depth:
return
# Nothing to include
if self.include is None:
return
all_serials = []
if self.stages and type(self.stages) is dict:
all_serials = [v["_serial"] for v in self.stages.values() if self.stages\
and "_serial" in v]
all_serials.append(self.info["_serial"])
if len(self.include) is 0 and self.ckey_template:
contained = set(self.ckey_template.keys()) - set(all_serials)
if len(contained) > 0:
if self.stages is None or len(self.stages) is 0:
serial_name = "_nostage_" + self.info["_serial"]
else:
serial_name = self.info["_serial"]
x = deepcopy(self.ckey_template)
self.ckey_template = { serial_name: x }
# Stages must be in dict format
if self.stages and isinstance(self.stages, list):
self.process_stages()
if self.path:
include_paths = [dirname(self.path), "."]
else:
include_paths = ["."]
for n in range(len(self.include)):
serial = None
status = None
# Handle simple include format
if isinstance(self.include[n], str) or isinstance(self.include[n], unicode):
include_file = self.include[n]
self.include[n] = {
"id": include_file
}
else:
if "id" in self.include[n]:
include_file = self.include[n]["id"]
else:
raise ValueError("include structure is missing 'id' field")
if "_serial" in self.include[n]:
serial = self.include[n]["_serial"]
if "status" in self.include[n]:
status = self.include[n]["status"]
local_file = util.retrieve_file(include_file, include_paths, [".json"])
merge_from = JCF(local_file,
serial=serial)
merge_from.process_stages()
# Add to included with full path for reference purposes
self.included.append(local_file)
# Call recursive merge
merge_from.process_includes(depth=depth + 1)
# Set flow control for this set of stages
if self.include_options and \
merge_from._serial in self.include_options:
io = self.include_options[merge_from._serial]
last_stage_of_current = self.get_last_stage()
first_stage_of_merge = merge_from.get_first_stage()
# Tie the last stage of the current file to the first stage
# of the next file
if last_stage_of_current and first_stage_of_merge:
last_stage_of_current[1]["next_default"] = \
first_stage_of_merge[0]
# Merge all sections
self.merge(merge_from)
# Add to included line for reference
self.included = self.included + merge_from.included
# Uniquify included line
self.included = list(set(self.included))
# Empty include line to prevent future merging
self.include = []
def merge(self, merge_from):
'''
Merges the merge_from JCF object into this one. Merge is done
on each individual section separately depending on the structure for
that particular section.
'''
# Merge Sections
# tags
# include (implicitly done via process_includes())
# info
# suts
# stages
# ckey
# local
# ckey_template
# job_timeout
# job_group
# Create return data structure
change_data = dict()
# Path
# Inherit path from next JCF if not set. This is to ensure that pathless
# JCFs return semi-significant error strings.
if not self.path and merge_from.path:
self.path = merge_from.path
# Tags
# Uniquely merge the tag lists
self.tags = sorted(list(set(self.tags + merge_from.tags)))
# Info
# This probably needs better handling but for now just take the info section
# that contains a cirrus_job_id, else merge key-by-key
if "cirrus_job_id" not in self.info and "cirrus_job_id" in merge_from.info:
# merge_from merges into self
self.info.update(merge_from.info)
else:
# self merges info merge_from
# merge_from becomes new info section
x = deepcopy(merge_from.info)
x.update(self.info)
self.info = x
# special case for "name" -- take first one that is not the default
# and pull in "desc" from that as well
if self.info["name"] == self.default_name and \
merge_from.info["name"] != self.default_name:
for k in ("name", "desc"):
if k in merge_from.info and merge_from.info[k]:
self.info[k] = merge_from.info[k]
# Suts
# suts that have the same ID are renamed and references updated much
# the same as stages
# x = deepcopy(merge_from.suts)
# x.update(self.suts)
# self.suts = x
my_sut_names = self.suts.keys()
merge_from_sut_names = merge_from.suts.keys()
for n in merge_from_sut_names:
new_name = n
if n in my_sut_names:
# Begin rename process
# Create a new name
i = 2
new_name = n + "_" + str(i)
while new_name in my_sut_names or new_name in merge_from_sut_names:
i += 1
new_name = n + "_" + str(i)
# Move sut to new name
merge_from.suts[new_name] = merge_from.suts[n]
del merge_from.suts[n]
# Find and replace all variable references to this sut
var_re = r'\$\{suts\.(' + n + r')[-\w\s\. \[\]]+\}'
util.find_replace_re(merge_from.get_dict(), var_re, new_name)
merge_from.update_attributes()
# Find and replace all target controls referencing this sut
for stage_data in merge_from.stages.values():
if "target" in stage_data and stage_data["target"] == n:
stage_data["target"] = new_name
# Merge in new sut
self.suts[new_name] = merge_from.suts[new_name]
# Stages
# TODO: need to determine how to best handle conflicts
# Options:
# 1) replace with current version (consistent with other operations)
# 2) replace with merge version
# 3) rename duplicates with a new identifier
# -- I like #3 because stages like "reboot" could be called multiple
# times but it has a downside with flow control
# in that the flow logic won't know which stage to call
# next
# Convert stages into consistent format
self.process_stages()
merge_from.process_stages()
# Copy init stage if not already defined
if not self.init_stage and merge_from.init_stage:
self.init_stage = merge_from.init_stage
# Delete auto_init stage because it is no longer valid
self.auto_init_stage = None
# For now all stages are preserved even if they have duplicate
# identifiers
# stage names in the merge_from must be tracked to update any
# references to them within merge_from. Only merge_from may have
my_stage_names = self.get_stage_names()
merge_from_stage_names = merge_from.get_stage_names()
for n in merge_from_stage_names:
new_name = n
if n in my_stage_names:
# Begin rename process
# Create a new name
i = 2
new_name = n + "_" + str(i)
while new_name in my_stage_names or new_name in merge_from_stage_names:
i += 1
new_name = n + "_" + str(i)
# Move stage to new name
merge_from.stages[new_name] = merge_from.stages[n]
merge_from.stages[new_name]["id"] = new_name
merge_from.stages[new_name]["instance"] = i
del merge_from.stages[n]
# Find and replace all variable references to this stage
var_re = r'\$\{stages\.(' + n + r')[-\w\s\. \[\]]+\}'
util.find_replace_re(merge_from.get_dict(), var_re, new_name)
merge_from.update_attributes()
# Find and replace all flow controls referencing this stage
for v in merge_from.stages.values():
for fc in self.flow_controls:
if fc in v and v[fc] == n:
v[fc] = new_name
# Merge in new stage
self.stages[new_name] = merge_from.stages[new_name]
# Ckey
# ckeys in current JCF take precedence
x = deepcopy(merge_from.ckey)
x.update(self.ckey)
self.ckey = x
# local
x = deepcopy(merge_from.local)
x.update(self.local)
self.local = x
# ckey_template
# XXX templates in current JCF take precedence XXX
# Note: Changed rules in bb3e9ae: now templates in merge JCF take
# precedence
x = deepcopy(merge_from.ckey_template)
self.ckey_template.update(x)
# job_timeout
# TODO: take the higher of the two? Add them?
if merge_from.job_timeout:
self.job_timeout = merge_from.job_timeout
# job_group
if merge_from.job_group:
self.job_group = merge_from.job_group
# configure
if merge_from.configure:
x = deepcopy(merge_from.configure)
if self.configure:
x.update(self.configure)
self.configure = x
# Re-process stages
self.process_stages()
def process_stages(self):
'''
Converts stages section into something usable by Agent. This involves
changing stages from a ordered array to a dict with "order" keys,
and "next_default" keys set appropriately.
It also adds defaults for any required keys.
The only check that made is for duplicate stage names. If encountered
the dup stage is renamed by appending "_2" then "_3" etc.
'''
if not self.stages:
# No stages to process
return
next_order_stages = dict()
explicit_order_stages = list()
implicit_order_stages = list()
if isinstance(self.stages, list):
# Make all names unique
for stage_index in range(len(self.stages)):
s = self.stages[stage_index]
id = s.keys()
if len(id) != 1:
raise ValueError("JCF " + str(self.path) +
" stages section is corrupt, each" +
" list element must contain exactly" +
" one stage")
id = base_id = id[0]
v = s[id]
# Create unique stage ID
i = 2
stage_names = [x.keys()[0] for x in self.stages]
try:
# Remove my own ID from the list
stage_names.remove(id)
except:
pass
while id in stage_names:
id = base_id + "_" + str(i)
i += 1
# Store new stage
self.stages[stage_index] = { id: v }
# Set self-reference
v["id"] = id
# Set initial instance if not present
v.setdefault("instance", 1)
# Find highest order
if "order" in v:
try:
o = int(v["order"])
except:
raise ValueError("order field in stage {0} is not " +
"an integer".format(base_id))
if o >= self.order:
self.order = o + 1
# Separate stages according to these rules:
# - Stages that have next* settings (highest sort precedence)
# - Stages that have order field (lower sort precedence)
# - Stages with neither (order is literal)
for s in self.stages:
if len(s.keys()) != 1:
raise ValueError("JCF " + str(self.path) +
" stages section is corrupt, each list " +
" element mustcontain exactly one stage")
id = s.keys()[0]
v = s.values()[0]
if "next_pass" in v:
v["next_default"] = v["next_pass"]
if "order" in v:
del v["order"]
next_order_stages[id] = v
elif "next_default" in v:
if "order" in v:
del v["order"]
next_order_stages[id] = v
elif "order" in s:
if int(v["order"]) >= self.order:
# Find highest order number
self.order = int(v["order"]) + 1
explicit_order_stages.append(s)
else:
implicit_order_stages.append(s)
else:
# Dict format
# Find highest order number
for s in self.stages.values():
# Figure out highest order
if "order" in s and \
int(s["order"]) >= self.order:
self.order = int(s["order"]) + 1
# Separate stages according to these rules:
# - Stages that have next* settings (highest sort precedence)
# - Stages that have order field (lower sort precedence)
# - Stages with neither (order is literal)
for id, v in self.stages.items():
s = {id: v}
# Set self-reference
v["id"] = id
# Set initial instance if not present
v.setdefault("instance", 1)
if "next_default" in v:
if "order" in v:
del v["order"]
next_order_stages[id] = v
elif "next_pass" in v:
v["next_default"] = v["next_pass"]
if "order" in v:
del v["order"]
next_order_stages[id] = v
elif "order" in s:
if int(v["order"]) >= self.order:
# Find highest order number
self.order = int(v["order"]) + 1
explicit_order_stages.append(s)
else:
implicit_order_stages.append(s)
# COMMON PROCESSING for list format and dict format
# Apply order to implicit stages
for s in implicit_order_stages:
v = s.values()[0]
v["order"] = self.order
self.order += 1
explicit_order_stages.append(s)
# Build new dict with correct order
# - Stages that have next* settings (highest sort precedence)
prev_stage = None
prev_id = None
auto_init_stage_set = False
stage_paths = self._get_stage_path(path="next_default",
stage_dict=next_order_stages)
default_path_stages = stage_paths["next_default"]
outside_path_stages = stage_paths["other"]
for id, v in default_path_stages:
# First id will be the first stage id
if not auto_init_stage_set:
self.auto_init_stage = id
auto_init_stage_set = True
if prev_stage:
prev_stage["next_default"] = id
prev_id = id
prev_stage = next_order_stages[id]
# - Stages that have order field (all the rest at this point)
prev_stage = None
prev_id = None
for s in sorted(explicit_order_stages,
key=lambda(k): k.values()[0]["order"]):
id = s.keys()[0]
v = s.values()[0]
# First id will be the first stage id
if not auto_init_stage_set:
self.auto_init_stage = id
auto_init_stage_set = True
next_order_stages[id] = v
if prev_stage:
prev_stage["next_default"] = id
prev_stage = v
prev_id = id
del prev_stage["order"]
# Stages that don't fit in the default path
# TODO: this is just tacked on right now -- need to do more?
for id, v in default_path_stages:
next_order_stages[id] = v
# Assign new stage structure to JCF
self.stages = next_order_stages
# Run through list again to gather any disable (skipped) stage names
# Also add required keys if not present:
# - target
disabled_stages = list()
for id, s in self.stages.items():
if s.get("disable", False):
disabled_stages.append(id)
if "target" not in s:
s["target"] = "${sut}"
try:
self.skip_stages(disabled_stages)
except FlowError as e:
raise ValueError("A disabled (skipped) stage may have " +
"created an infinite loop or you " +
"have skipped all stages, check JCF " +
"or enable a stage; " +
e.message)
def process_system_vars(self):
'''
Interpolates Cirrus system variables--these are special variables
set by the system automatically for convenience.
'''
# Get system variables
# Primary SUT:
# - If one sut defined, that becomes the default
# - SUT labeled "sut"
# - SUT with "default" set to some true value
sys_sut = None
sut_ids = self.suts.keys()
mapping = {}
if len(sut_ids) == 1:
mapping[sut_ids[0]] = sut_ids[0]
elif len(sut_ids) > 1:
if "sut" in sut_ids:
for sut_id in sut_ids:
mapping[sut_id] = sut_id
else:
# no sut found in suts section, try to find the one with default enable
for sut_id in sut_ids:
if "default" in self.suts[sut_id] and self.suts[sut_id]["default"]:
mapping[sut_id] = sut_id
break
else:
mapping["sut"] = "(no suts defined)"
# Convert SUT ID, if it is not an error, to ipaddress
# TODO: develop a better mechanism for identifying SUTs to Agent and in
# GUI and JCF--ILO IP may not work for everything
for k in mapping.keys():
if k in self.suts:
# sys_sut = self.suts[sys_sut].get("ipaddress", "(no IP address found)")
# change "ipaddress" to "sys_ip"
mapping[k] = self.suts[k].get("sys_ip", "(no IP address found)")
# Cirrus Server (this system)
# TODO: this will work for current activities which mainly validate that
# the current system is the one we think it is for stage execution
# (code in agent.py) but this needs to be refined to identify primary
# IPs in multi-homed hosts.
# See also: origination setting
if "cirrus_ip" in self.info and self.info["cirrus_ip"]:
cirrus_ip = self.info["cirrus_ip"]
else:
cirrus_ip = util.get_preferred_local_ip()
# Now replace all variable instances
mapping["cirrus"] = cirrus_ip
for k, v in mapping.items():
if not k.startswith("$"):
k = "${" + k + "}"
util.find_replace(self.info, k, v)
util.find_replace(self.suts, k, v)
util.find_replace(self.stages, k, v)
def process_file_vars(self):
'''
This walks the entire JCF structure looking for variables and
interpolates them base in local file scope
'''
if "_serial" in self.info:
self.interpolate_variables(scope=self.info["_serial"])
def process_stage_vars(self, stage=None):
'''
This walks the entire JCF structure looking for variables and
interpolates them based on stage scope
'''
if stage:
s = self.get_stage_by_name(stage)
if s and "_serial" in s:
self.interpolate_variables(scope=s["_serial"])
else:
self.interpolate_variables()
else:
self.interpolate_variables()
def process_ckey_defaults(self):
'''
This scans ckey_template section and inserts any missing settings into
the ckey section if they are not present.
It also checks that expected lists appear in list format.
'''
if not self.ckey_template:
return
for serial in self.ckey_template:
ckeyList = self.ckey_template[serial]
for ct in ckeyList.keys():
if ct not in self.ckey:
if "default" in ckeyList[ct]:
self.ckey[ct] = ckeyList[ct]["default"]
elif "hidden" in ckeyList[ct]:
raise ValueError("JCF " + str(self.path) +
" hidden ckey_template setting " +
"'{0}' must have a default field".format(ct))
# Auto-create lists if expected
def_type = ckeyList[ct].get("data_type", "")
if (ct in self.ckey and
def_type.lower() == "list" and
not isinstance(self.ckey[ct], list)):
self.ckey[ct] = [self.ckey[ct]]
def process_singletons(self):
# Gather all singletons by their ID
singletons = dict()
singleton_chosen = dict()
for stage_id, stage_data in self.stages.items():
singleton_id = stage_data.get("singleton_group", False)
if singleton_id:
instance = stage_data["instance"]
chosen = stage_data.get("singleton_choice", False)
singleton_id = singleton_id.lower()
if singleton_id not in singletons:
singletons[singleton_id] = dict()
singleton_chosen[singleton_id] = False
singletons[singleton_id][stage_id] = stage_data
if not singleton_chosen[singleton_id] and chosen:
singleton_chosen[singleton_id] = chosen
# Now we have a dictionary of singleton IDs containing all stages
# and we need to remove all but one of those stages. Choose either
# the first or the last singleton depending on preference. The default
# is the first.
stages_to_remove = list()
for singleton_id in singletons.keys():
keep_stage = None
chosen = singleton_chosen[singleton_id]
if not chosen:
chosen = "first"
else:
chosen = chosen.lower()
# Find either the first or last instance
for stage_id in singletons[singleton_id].keys():
instance = int(singletons[singleton_id][stage_id]["instance"])
if keep_stage is None:
keep_stage = stage_id
elif chosen == "first" and \
instance < singletons[singleton_id][keep_stage]["instance"]:
keep_stage = stage_id
elif chosen == "last" and \
instance > singletons[singleton_id][keep_stage]["instance"]:
keep_stage = stage_id
# Remove all but chosen instance
for stage_id in singletons[singleton_id].keys():
if stage_id != keep_stage:
stages_to_remove.append(stage_id)
# Remove duplicate singletons
try:
self.remove_stages(stages_to_remove)
except FlowError as e:
raise ValueError("Infinite loop detected while removing " +
"duplicate stages: " +
",".join(stages_to_remove) +
"; this JCF cannot be processed until that " +
"is resolved; " +
e.message)
def import_system_config(self):
pass
def copy(self):
return deepcopy(self)
def get_dict(self):
'''
Refreshes the raw member which is the raw JSON content (in a Python
dict) and returns the data.
'''
self._raw = dict()
for s in self.section_members:
data = getattr(self, s)
if data:
self._raw[s] = data
return self._raw
def update_attributes(self):
# Reload data for all members
self.info = self._raw.get("info", dict())
self.init_stage = self._raw.get("init_stage", None)
self.tags = self._raw.get("tags", [])
self.include = self._raw.get("include", [])
self.included = self._raw.get("included", [])
self.local = self._raw.get("local", dict())
self.tags = self._raw.get("tags", [])
self.suts = self._raw.get("suts", dict())
self.stages = self._raw.get("stages", [])
self.ckey = self._raw.get("ckey", dict())
self.local_ckey = self._raw.get("local_ckey", dict())
self.ckey_template = self._raw.get("ckey_template", dict())
self.module_info = self._raw.get("module_info", dict())
self.dynamic = self._raw.get("dynamic", dict())
self.origination = self._raw.get("origination", None)
self.job_timeout = self._raw.get("job_timeout", None)
self.configure = self._raw.get("configure", None)
self.optionTextMap = self._raw.get("optionTextMap", None)
self.id = self._raw.get("id", None)
self.job_group = self._raw.get("job_group", [])
def get_raw(self):
# Function renamed to get_dict()
return self.get_dict()
def get_json(self):
# Returns stringified JSON syntax of this object
return json.dumps(self.get_dict())
def write(self, filename=None):
if filename is None:
filename = self.path
# Write JSON
util.write_json(filename, self.get_raw())
def get_stage_names(self):
return self.stages.keys()
def get_stage_by_name(self, stage_name):
return self.stages.get(stage_name, None)
def get_substage_by_name(self, stage_name, substage_name="action"):
s = self.stages.get(stage_name, None)
if s and substage_name in s:
return s[substage_name]
else:
return None
def get_stage_by_serial(self, serial):
for s, v in self.stages.items():
if "_serial" in v and v["_serial"] == serial:
return s, v
return None, None
def get_substage_by_serial(self, serial, substage_name="action"):
for id, s, ss in self.get_substages(substage=substage_name):
if "_serial" in ss and ss["_serial"] == serial:
return id, s, ss
return None, None, None
def get_substage_module_name(self, stage_name, substage_name="action"):
s = self.stages.get(stage_name, None)
if s and substage_name in s:
return s[substage_name].get("cirrus_module", None)
else:
return None
def _get_stage_path(self, path="next_default", init_stage=None, stage_dict=None):
'''
Returns a dict containing one or more lists of tuples representing the
stage names and their data. Each set of tuples represents various
categories of stages:
- <path>: stages that follow a sequence and will be executed within
a job assuming the "path" is used as the path. The default path
is "next_default", or the default stage path. Therefore the key in
the dict is also called "next_default".
- other: stages that fall outside of the path or "path"
Notes:
- Calls process_stages if it has not already been done
- ordered + standalone will represent all stages within a job
path = the path by which to establish the order. Any "next*"
setting will work assuming it is present. Defaults to the default
path (next_default).
init_stage = the name of the stage to start the path from. The default
is the first stage of the JCF as determined by process_stages()
(the value of this is stored in auto_init_stage member).
'''
ordered_stages = list()
unordered_stages = list()
visited_stages = dict()
if stage_dict is None:
if not isinstance(self.stages, dict):
self.process_stages()
stage_dict = self.stages
if not stage_dict:
return {
path: [],
"other": []
}
# Determine initial stage in this order of precedence:
# - override passed into this function
# - user-supplied init_stage
# - auto_init_stage as calculated by process_stages
s = init_stage or self.init_stage or self.auto_init_stage
if not s:
# No init_stage so figure it out
all_stages = stage_dict.keys()
for id in copy(all_stages):
# The first stage is detected by looking for the one stage
# that won't have anything else pointing to it. This approach
# will work with most cases. Anything else is a configuration
# error.
if "order" in stage_dict[id] and stage_dict[id]["order"] is 1:
all_stages = [id]
break
if "next_pass" in stage_dict[id]:
if stage_dict[id]["next_pass"] in all_stages:
all_stages.remove(stage_dict[id]["next_pass"])
elif "next_default" in stage_dict[id]:
if stage_dict[id]["next_default"] in all_stages:
all_stages.remove(stage_dict[id]["next_default"])
if len(all_stages) == 1:
# Found the most likely initial stage
s = all_stages[0]
else:
err = "Cannot figure out initial stage from subset" + \
" " + str(sorted(stage_dict.keys())) + "."
if len(all_stages) > 1:
err += " Ambiguous candidates: " + str(sorted(all_stages))
else:
err += " No candidates. Possible illegal stage loop."
err += " You could use 'init_stage' setting to resolve this."
raise ValueError(err)
while(1):
if s in visited_stages:
# If stage is encountered twice we are in a loop, quit now
s = "_quit"
elif s in stage_dict:
# Stage is in JCF, add to list and use 'path' path to find
# the next
ordered_stages.append((s, stage_dict[s]))
visited_stages[s] = True
next_stage = stage_dict[s].get(path, "_next")
if not next_stage or next_stage == "_next":
next_stage = stage_dict[s].get("next_default", "_quit")
if not next_stage:
next_stage = "_quit"
s = next_stage
else:
s = "_quit"
if s == "_quit":
break
# Calculate standalone stages
for id in sorted(list(set(stage_dict.keys()) - set(visited_stages.keys()))):
unordered_stages.append((id, self.stages[id]))
return {
path: ordered_stages,
"other": unordered_stages
}
def get_ordered_stages(self, path="next_default", init_stage=None, stage_dict=None):
'''
Returns a list of tuples representing the stage names and data in the
order they are executed.
Notes:
- Calls process_stages if it has not already been done
- It is possible that some stages will not be returned if they are
not part of the path. e.g. if a stage is meant to handle a failure
and is only reached via "next_fail".
(You can use get_standalone_stages() to retrieve those.)
path = the path by which to establish the order. Any "next*"
setting will work assuming it is present. Defaults to the default
path (next_default).
init_stage = the name of the stage to start the path from. The default
is the first stage of the JCF as determined by process_stages()
(the value of this is stored in auto_init_stage member).
'''
return self._get_stage_path(path, init_stage, stage_dict)[path]
def get_unordered_stages(self, path="next_default", init_stage=None, stage_dict=None):
'''
Returns a list of tuples representing the stage names and data that
fall outside of the perceived order. Essentially these are stages
that will not ever be called under the given path.
Notes:
- Calls process_stages if it has not already been done
path = the path by which to establish the order. Any "next*"
setting will work assuming it is present. Defaults to the default
path (next_default).
init_stage = the name of the stage to start the path from. The default
is the first stage of the JCF as determined by process_stages()
(the value of this is stored in auto_init_stage member).
'''
return self._get_stage_path(path, init_stage, stage_dict)["other"]
def get_first_stage(self, stage_dict=None):
'''
Returns the first tuple that comes out of get_ordered_stages()
which is (stage name, stage data)
'''
stages = self.get_ordered_stages(stage_dict=stage_dict)
if not stages:
return None
return stages[0]
def get_last_stage(self, stage_dict=None):
'''
Returns the last tuple that comes out of get_ordered_stages()
which is (stage name, stage data)
'''
stages = self.get_ordered_stages(stage_dict=stage_dict)
if not stages:
return None
return stages[-1]
def get_job_group(self):
return [item for item in self.job_group if item.get('placeholder') is None]
def get_sut_names(self):
return self.suts.keys()
def get_sut_by_name(self, name):
return self.suts.get(name, None)
def get_substages(self, substage="all"):
'''
Generator for finding all substages (action, validate, report) within
this JCF. The generator returns a tuple:
- stage ID (str)
- substage ID(str) - one of: action, validate, report
- substage data (dict) - e.g. self.stages[id]["action"] where id is the
first tuple element
'''
for id, s in self.stages.items():
if substage == "all" or substage == "action":
if "action" in s:
yield (id, "action", s["action"])
if substage == "all" or substage == "validate":
if "validate" in s:
yield (id, "validate", s["validate"])
if substage == "all" or substage == "report":
if "report" in s:
yield (id, "report", s["report"])
def get_ckey(self, key=None, scope=None, default=None):
r = self.get_local_ckey(key, scope, default)
g = self.get_global_ckey(key, default)
if key:
return r or g
else:
if r is None:
return g
elif g:
g = deepcopy(g)
g.update(r)
return g
else:
return r
def get_global_ckey(self, key=None, default=None):
if key is not None:
if key in self.ckey:
return self.ckey[key]
else:
return default
else:
return self.ckey
def get_local_ckey(self, key=None, scope=None, default=None):
if not scope:
scope = self._serial
if scope == "*":
# Search everything
if not key:
raise ValueError("Search of all scopes (*) requires a key")
scope = self.get_scope(key)
if not scope:
return default
elif scope not in self.local:
return default
if key is not None:
if key in self.local[scope]:
return self.local[scope][key]
else:
return default
else:
return self.local[scope]
def get_scope(self, key):
'''
Returns the first scope that contains a given key
'''
for s in self.local.keys():
if key and key in self.local[s]:
return s
return None
def _interpolate_string(self, string, scope=None, filter=""):
'''
This is the atomic function at the heart of all variable interpolation.
It is the code that does the actual substitution.
string = the string to interpolate
scope = the specific scope to look at (serial number of the object).
If set to None (default) then use global scope.
Special scopes:
"*" means search every scope that is available and returns
the first occurrence of a variable. If multiple occurrences
exist then one returned is unpredictable under "*" scope.
filter = Limits interpolation to variables that start with filter.
For example if "suts" is the filter then only ${suts...}
variables will be interpolated.
'''
var_re = r'(\$\{(' + filter + r'[- \w\s\.\:\[\]]+)\})'
obj_data = self.get_dict()
# Outer while loop will ensure we get all nested variables like
# string = "The ${var1} jumped over the fence"
# ${var1} = "${var2}"
# ${var2} = "dog"
loops = 0
while True:
matches = re.findall(var_re, string)
if not matches:
break
loops += 1
if loops >= 100:
raise ValueError("JCF " + str(self.path)
+ "Infinite loop detected on string '"
+ string)
dups = dict()
for replaceme, key in matches:
# Skip duplicate matches as they have already been processed
if key in dups:
continue
else:
dups[key] = True
# Change path1.path2.key into ['path1']['path2']['key']
key_parts = key.split(".")
key_string = str()
# Handle local variables (ckeys only) by redirecting
# interpolation to local section if the given ckey is
# found there
if scope and len(key_parts) > 1 and key_parts[0] == "ckey":
k = key_parts[1]
if scope == "*":
scope = self.get_scope(k)
if self.get_local_ckey(k, scope) != None:
key_parts = list(key_parts)
key_parts[0] = "local"
key_parts.insert(1, scope)
# Look at each key part and identify any array refs
for kp in key_parts:
m = re.search(r'(.+?)(\[\d+\])$', kp)
if m:
# array form: ['key'][#]
key_string += "['" + m.group(1) + "']" + m.group(2)
else:
# non-array form: ['key']
key_string += "['" + kp + "']"
# See if key exists in this object
try:
value = eval("obj_data" + key_string)
# Return String or Structure?
#
# If variable is by itself like "${var}" as opposed to
# something like "My name is ${var}" then we allow this
# to morph into a structure reference.
# First we detect this situation by comparing the
# seeing if the variable string is the entire "replaceme."
# In this case we need to check if the value we looked
# up is a string or a structure. If it is not a str/unicode
# then it must be a structure and so we will return
# the structure instead.
# However, if it turns out to be a string we treat it
# normally and keep processing it for multiple
# interpolation.
if (replaceme == string and
not isinstance(value, str) and
not isinstance(value, unicode)):
return value
except:
# There is no corresponding value for this key so flag the
# object as not fully interpolated, change the key so it
# appears empty which will prevent it from being processed
# again.
self.interpolation_errors.append(replaceme)
value = replaceme.replace("${", "${}{")
# Replace all instances
string = string.replace(replaceme, value)
# Revert any missing values to their original state
string = string.replace("${}", "$")
return string
def get_stage_ckey(self, stage_id, ckey=None, default=None):
s = self.get_stage_by_name(stage_id)
if not s:
return None
return self.get_local_ckey(ckey, s.get("_serial", None), default)
def _interpolate_structure(self, structure, scope=None, filter=""):
'''
Recursive function to descend JCF structure and interpolate every
string
structure = the structure to interpolate
scope = the specific scope to look at (serial number of the object).
If set to None (default) then use global scope.
There is a special filter called "*" which means search
every scope that is available and returns the first
occurrence of a variable. If multiple occurrences exist
then one returned is unpredictable under "*" scope.
filter = Limits interpolation to variables that start with filter.
For example if "suts" is the filter then only ${suts...}
variables will be interpolated.
'''
if isinstance(structure, str) or isinstance(structure, unicode):
return self._interpolate_string(structure, scope, filter)
elif isinstance(structure, dict):
if "_serial" in structure:
scope = structure["_serial"]
for k in structure.keys():
orig = structure[k]
structure[k] = self._interpolate_structure(
structure[k], scope, filter)
elif isinstance(structure, list):
for i in range(len(structure)):
structure[i] = self._interpolate_structure(
structure[i], scope, filter)
# This will return as-is data types that cannot be interpolated
# (numbers, booleans, etc.) or return the recursively modified
# structure.
return structure
def interpolate_variables(self, section=None, scope=None, filter=""):
'''
Scan entire object for strings that look like variables and replace
them with actual values.
The internal variable format is specific to Cirrus, here is an
example:
${stages.My Stage.action.cirrus_module}
This corresponds to the dict representation of a JCF:
self.get_dict()['stages']['My Stage']['action'][cirrus_module']
Nested variables (an interpolated variable that contains another
variable to interpolate) are permitted as long as they don't create
a loop.
section = the section to interpolate (e.g. suts or stages). If
set to None (default) then all sections that support
variables are processed
scope = the specific scope to look at (serial number of the object).
If set to None (default) then use global scope.
There is a special filter called "*" which means search
every scope that is available and returns the first
occurrence of a variable. If multiple occurrences exist
then one returned is unpredictable under "*" scope.
filter = Limits interpolation to variables that start with filter.
For example if "suts" is the filter then only ${suts...}
variables will be interpolated.
Sections that can contain variables:
suts
stages
ckey
local_ckey
info
'''
# Clear interpolation errors, if there is something missing it will
# be recorded.
self.interpolation_errors = list()
if section is not None and not isinstance(section, list):
section = [section]
# Interpolate all supported sections
if not section or "suts" in section:
self.suts = self._interpolate_structure(self.suts, scope, filter)
# Convert list to string for SUT name and ID
for sut_name in self.suts:
sut_data = self.suts[sut_name]
for f in "id", "name":
if f in sut_data and isinstance(sut_data[f], list) and sut_data[f]:
sut_data[f] = sut_data[f][0]
if not section or "stages" in section:
self.stages = self._interpolate_structure(self.stages, scope, filter)
if not section or "ckey" in section:
self.ckey = self._interpolate_structure(self.ckey, scope, filter)
if not section or "info" in section:
self.info = self._interpolate_structure(self.info, scope, filter)
def resolve_sut(self, id):
# See if ID is literal
if id in self.suts:
return self.suts[id]
# ID is an IP address
for s in self.suts.values():
if "sys_ip" in s and s["sys_ip"] == id:
return s
return None
def get_sut(self, id):
# See if ID is literal
if id in self.suts:
return id, self.suts[id]
# ID is an IP address
for sut_real_id, s in self.suts.items():
if "sys_ip" in s and s["sys_ip"] == id:
return sut_real_id, s
# ID is an internal ID
for sut_real_id, s in self.suts.items():
if "id" in s and int(s["id"]) == int(id):
return sut_real_id, s
return None, None
def get_configuration(self, setting, stage=None):
# get setting from configure
if stage:
if stage in self.stages.keys():
if 'configure' in self.stages[stage].keys() and \
setting in self.stages[stage]['configure'].keys():
return self.stages[stage]['configure'][setting]
else:
raise ValueError("Invalid stage '" + stage)
if self.configure:
if setting in self.configure.keys():
return self.configure[setting]
return None
def skip_stages(self, stage_list):
'''
Re-route around a set of stages by changing all references to those
stages to point to the next stage in the flow.
stage_list is a list of stage IDs to skip
Exceptions:
FlowError if an infinite loop is detected
'''
if not stage_list:
return
reroutes = True
depth = 0
while reroutes:
reroutes = False
depth += 1
# Possible looping stage, give up
if self.max_depth != -1 and depth > self.max_depth:
raise FlowError("Infinite loop detected in flow; depth=" +
str(depth - 1))
# Route init stage
if self.init_stage in stage_list:
d_stage = self.stages[self.init_stage]
if "next_pass" in d_stage:
new_fc = d_stage["next_pass"]
else:
new_fc = d_stage["next_default"]
self.init_stage = new_fc
reroutes = True
# Route any other stages so they point to the next stage
# If the next stage is also disabled it will be rerouted
# again on the next loop
for id, s in self.stages.items():
for fc in self.flow_controls:
if s.get(fc, None) in stage_list:
d_stage = self.stages[s[fc]]
if "next_pass" in d_stage:
new_fc = d_stage["next_pass"]
else:
new_fc = d_stage.get(fc, d_stage["next_default"])
s[fc] = new_fc
reroutes = True
def remove_stages(self, stage_list):
'''
Similar to skip_stage except the stage is not only routed around but
removed entirely
stage_list is a list of stage IDs to remove
Exceptions:
FlowError if an infinite loop is detected (raised by skip_stages)
'''
self.skip_stages(stage_list)
for s in stage_list:
if s in self.stages:
del self.stages[s]
# TODO: Rename Module to something else--the term is overloaded and not accurate in this case
#
# it's added w/ commit 9e28ba19801dd90f2d29d999487c64b64929
# used on :path:`module/*.py`
# not imported directly
# try :cmd:`grep -r '\.Module'`
class Module(JCF):
"""
Utility class functions for use with modules
"""
def __init__(self, leader_file):
leader_file = abspath(leader_file)
if not isfile(leader_file):
raise ValueError("Leader file " + leader_file + " not found")
JCF.__init__(self, leader_file)
self.process_stages()
self.process_includes()
self.process_stages()
self.process_file_vars()
# Set various areas based on the location of leader_file
self.module_info["substage_working_area"] = dirname(leader_file)
self.module_info["stage_working_area"] = dirname(dirname(leader_file))
self.module_info["job_working_area"] = dirname(dirname(dirname(dirname(leader_file))))
self.module_info["job_status_file"] = join(self.module_info["job_working_area"], "status.json")
module_name = self.get_substage_module_name(self.module_info["stage_id"], self.module_info["substage_id"])
if module_name:
self.module_info["module_working_area"] = join(self.module_info["job_working_area"], "module", module_name)
else:
self.module_info["module_working_area"] = None
# Built-in job status access
self.status = Status(
self.module_info["job_status_file"],
autoupdate=True)
self.live_status = LiveStatus(
self.module_info["job_working_area"],
self.module_info["job_status_file"],
autoupdate=True)
self.live_jcf = LiveJCF(self.module_info["job_working_area"])
def interpolate_string(self, string, scope=None, filter=""):
'''
A public version of _interpolate_string(). It is slightly different
in that the scope default is that of the active stage, and it will
also interpolate system variables.
This is the atomic function at the heart of all variable interpolation.
This public version is meant to be used to extend Cirrus variables
to external files or data. (e.g. read file in as string, pass it into
this function, then write it out again.)
string = the string to interpolate
scope = the specific scope to look at (serial number of the object).
If set to None (default) then use current stage scope.
Special scopes:
"*" means search every scope that is available and returns
the first occurrence of a variable. If multiple occurrences
exist then one returned is unpredictable under "*" scope.
"^" force global scope only
filter = Limits interpolation to variables that start with filter.
For example if "suts" is the filter then only ${suts...}
variables will be interpolated.
'''
s = str()
if scope == "^":
s = self._interpolate_string(string, None, filter)
elif scope is None:
stage = self.get_stage_id()
s = self.get_stage_by_name(stage)
if s and "_serial" in s:
s = self._interpolate_string(string, s["_serial"], filter)
# Covers all other cases
if not s:
s = self._interpolate_string(string, scope, filter)
# Write out system variable for job ID
for sv in (
"substage_working_area",
"stage_working_area",
"job_working_area"
):
s = s.replace("${" + sv + "}", self.module_info[sv])
s = s.replace("${cirrus_stage_id}", self.module_info["stage_id"])
s = s.replace("${cirrus_substage_id}", self.module_info["substage_id"])
s = s.replace("${cirrus_stage_sut}", self.get_stage_sut())
s = s.replace("${cirrus_job_id}", self.info.get("cirrus_job_id", "UNKNOWN"))
return s
def get_module_settings(self):
'''
Utility to load a stage-specific JCF and return pertinent settings back to
a module.
leader_file = full path to the leader JCF that contains stage-specific data
'''
stage_id = self.module_info["stage_id"]
substage_id = self.module_info["substage_id"]
if stage_id in self.stages and substage_id in self.stages[stage_id]:
return self.stages[stage_id][substage_id]
else:
raise LookupError("{0}/{1}".format(stage_id, substage_id) +
" not in stage structure, JCF corrupted!")
def get_job_working_area(self):
return self.module_info["job_working_area"]
def get_stage_working_area(self):
return self.module_info["stage_working_area"]
def get_substage_working_area(self):
return self.module_info["substage_working_area"]
def get_module_working_area(self):
return self.module_info["module_working_area"]
def get_job_status_file(self):
return self.module_info["job_status_file"]
def get_stage_id(self):
return self.module_info["stage_id"]
def get_substage_id(self):
return self.module_info["substage_id"]
def get_stage_data(self):
return self.get_stage_by_name(self.get_stage_id())
def get_stage_sut(self):
data = self.get_stage_data()
if "stage_sut" in data:
return data["stage_sut"]
else:
return "sut"
def get_substage_data(self):
return self.get_substage_by_name(self.get_stage_id(), self.get_substage_id())
def get_stage_target(self):
return self.get_stage_data()["target"]
def get_stage_sut_data(self):
sut_id, sut_data = self.get_sut(self.get_stage_sut())
return sut_data
def get_ckey(self, key=None, scope=None, default=None):
if scope is None:
scope = self.get_stage_data().get("_serial", None)
return super(Module, self).get_ckey(key, scope, default)
def get_local_ckey(self, key=None, scope=None, default=None):
if scope is None:
scope = self.get_stage_data().get("_serial", None)
return super(Module, self).get_local_ckey(key, scope, default)
class Status(object):
def __init__(self, status_file, autoupdate=True):
self.status_file = status_file
self.status = None
self.autoupdate = autoupdate
def update(self):
self.status = util.read_json(self.status_file)
def write(self):
util.write_json(self.status_file, self.status)
def _get_job_data(self, key):
if self.autoupdate:
self.update()
if self.status and key in self.status:
return self.status[key]
else:
return None
def _get_stage_data(self, name, key):
if self.autoupdate:
self.update()
if self.status and "stages" in self.status and name in self.status["stages"]:
return self.status["stages"][name].get(key, None)
return None
def _set_stage_data(self, name, key, value):
if self.autoupdate:
self.update()
if not self.status:
self.status = dict()
if "stages" not in self.status:
self.status["stages"] = dict()
if name in self.status["stages"]:
self.status["stages"][name][key] = value
else:
self.status["stages"][name] = {key:value}
self.write()
def stage_ran(self, name):
s = self.get_stage_status(name)
if s:
return True
else:
return False
def get_stage_status(self, name):
return self._get_stage_data(name, "status")
def get_stage_code(self, name):
return self._get_stage_data(name, "code")
def get_stage_signal(self, name):
return self._get_stage_data(name, "signal")
def get_stage_message(self, name):
return self._get_stage_data(name, "message")
def set_stage_message(self, name, message):
self._set_stage_data(name, "message", message)
def append_stage_message(self, name, message):
try:
current_message = self._get_stage_data(name, "message")
except:
current_message = ""
if current_message:
self._set_stage_data(name, "message", current_message + "; " + message)
else:
self._set_stage_data(name, "message", message)
def get_stage_time_start(self, name):
return self._get_stage_data(name, "time_start")
def get_stage_time_end(self, name):
return self._get_stage_data(name, "time_end")
def get_stage_duration(self, name):
end = self.get_stage_time_end(name)
start = self.get_stage_time_start(name)
if not start or not end:
return None
return end - start
def get_job_status(self):
return self._get_job_data("status")
def get_job_code(self):
return self._get_job_data("code")
def get_job_signal(self):
return self._get_job_data("signal")
def get_job_message(self):
return self._get_job_data("message")
def get_job_time_start(self):
return self._get_job_data("time_start")
def get_job_time_end(self):
return self._get_job_data("time_end")
def get_job_duration(self):
end = self.get_job_time_end()
start = self.get_job_time_start()
if not start or not end:
return None
return end - start
class LiveStatus(Status):
'''
Same as Status but uses the Agent for certain I/O operations
'''
def __init__(self, agent_working_dir, status_file, autoupdate=True):
super(LiveStatus, self).__init__(status_file, autoupdate)
self.status_queue = util.EventQueue("status", queue_root_dir=agent_working_dir, queue_type="sender")
def _wait_for_receiver(self, event_id):
while not self.status_queue.wait_all_events_processed(event_id):
time.sleep(3)
def _set_stage_data(self, name, key, value):
if self.autoupdate:
self.update()
d = {
"stages": {
name: {
key: value
}
}
}
msg = util.Event(d)
msg.type = "update"
self.status_queue.put(msg)
self.status_queue.send_events()
self._wait_for_receiver(msg.event_id)
class LiveJCF():
def __init__(self, agent_working_dir):
self.jcf_queue = util.EventQueue("jcf", queue_root_dir=agent_working_dir, queue_type="sender")
def _wait_for_receiver(self, event_id):
while not self.jcf_queue.wait_all_events_processed(event_id):
time.sleep(3)
def update(self, data):
msg = util.Event(data)
msg.type = "update"
self.jcf_queue.put(msg)
self.jcf_queue.send_events()
self._wait_for_receiver(msg.event_id)
| [
"apuaj@hpe.com"
] | apuaj@hpe.com |
f05d2c71ce52bff9656cf0194c7cf3ab35c12a64 | 630bf979e99b1b0e14f7ffdc65c18ba470ce2fe0 | /neuclease/dvid/annotation.py | 7422c0b6646b86f1ad03bcd07db19fd449660caa | [
"BSD-3-Clause"
] | permissive | y2mk1ng/neuclease | 0384294259aa592b4e58de2df959f3a3d9ca1338 | 02e36d7d76859d391c080e2a8690d1f80247f308 | refs/heads/master | 2022-12-14T02:15:44.251677 | 2020-09-16T20:57:11 | 2020-09-16T20:57:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,848 | py | import os
import sys
import logging
import warnings
from itertools import chain
from functools import partial
from collections import namedtuple
import ujson
import numpy as np
import pandas as pd
from dvidutils import LabelMapper
from . import dvid_api_wrapper, fetch_generic_json
from .common import post_tags
from .node import fetch_instance_info
from .voxels import fetch_volume_box
from ..util import Timer, Grid, boxes_from_grid, round_box, tqdm_proxy, compute_parallel, gen_json_objects, encode_coords_to_uint64, decode_coords_from_uint64
logger = logging.getLogger(__name__)
@dvid_api_wrapper
def post_sync(server, uuid, instance, sync_instances, replace=False, *, session=None):
"""
Appends to list of data instances with which the annotations are synced.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
sync_instances:
list of dvid instances to which the annotations instance should be synchronized,
e.g. ['segmentation']
replace:
If True, replace existing sync instances with the given sync_instances.
Otherwise append the sync_instances.
"""
body = { "sync": ",".join(sync_instances) }
params = {}
if replace:
params['replace'] = str(bool(replace)).lower()
r = session.post(f'{server}/api/node/{uuid}/{instance}/sync', json=body, params=params)
r.raise_for_status()
# Synonym
post_annotation_sync = post_sync
# The common post_tags() function works for annotation instances.
#post_tags = post_tags
@dvid_api_wrapper
def post_reload(server, uuid, instance, *, check=False, inmemory=True, session=None): # Note: See wrapper_proxies.post_reload()
"""
Forces asynchronous recreation of its tag and label indexed denormalizations.
Can be used to initialize a newly added instance.
Notes:
- This call merely triggers the reload and returns immediately.
For sufficiently large volumes, the reloading process on DVID will take hours.
The only way to determine that the reloading process has completed is to
monitor the dvid log file for a message that includes the
words ``Finished denormalization``.
- The instance will return errors for any POST request
while denormalization is ongoing.
Args:
server:
dvid server, e.g. 'emdata4:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
check:
If True, check denormalizations, writing to log when issues
are detected, and only replacing denormalization when it is incorrect.
inmemory:
If True, use in-memory reload, which assumes the server
has enough memory to hold all annotations in memory.
"""
params = {}
if check:
params['check'] = "true"
if not inmemory:
params['inmemory'] = "false"
r = session.post(f'{server}/api/node/{uuid}/{instance}/reload', params=params)
r.raise_for_status()
# Synonym
post_annotation_reload = post_reload
@dvid_api_wrapper
def fetch_label(server, uuid, instance, label, relationships=False, *, format='json', session=None):
"""
Returns all point annotations within the given label as an array of elements.
This endpoint is only available if the annotation data instance is synced with
voxel label data instances (labelblk, labelarray, labelmap).
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
label:
Body ID
relationships:
Set to true to return all relationships for each annotation.
format:
Either 'json' or 'pandas'.
Returns:
JSON list or pandas DataFrame
"""
assert format in ('json', 'pandas')
params = { 'relationships': str(bool(relationships)).lower() }
r = session.get(f'{server}/api/node/{uuid}/{instance}/label/{label}', params=params)
r.raise_for_status()
if format == 'json':
return r.json()
else:
return load_elements_as_dataframe(r.json())
# Synonym. See wrapper_proxies.py
fetch_annotation_label = fetch_label
@dvid_api_wrapper
def fetch_tag(server, uuid, instance, tag, relationships=False, *, session=None):
"""
Returns all point annotations with the given tag as an array of elements.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
tag:
The tag to search for
relationships:
Set to true to return all relationships for each annotation.
Returns:
JSON list
"""
params = { 'relationships': str(bool(relationships)).lower() }
r = session.get(f'{server}/api/node/{uuid}/{instance}/tag/{tag}', params=params)
r.raise_for_status()
return r.json()
@dvid_api_wrapper
def fetch_roi(server, uuid, instance, roi, roi_uuid=None, *, session=None):
"""
Returns all point annotations within the ROI. Currently, this
request will only work for ROIs that have same block size as
the annotation data instance. Therefore, most ROIs (32px blocks) are not
not compatible with most labelmap instances (64px blocks).
Warning:
The name 'fetch_roi()' clashes with a function in dvid.roi, so you
may need to explicitly import dvid.annotations to access this function:
from dvid.annotations import fetch_roi
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
roi:
The name of a roi instance, e.g. 'AL-lm'
roi_uuid:
If provided, the ROI will be fetched at this version.
Otherwise, the ROI will be fetched at the same version
as the requested annotation instance.
Returns:
JSON list
"""
if roi_uuid:
roi = roi + ',' + roi_uuid
r = session.get(f'{server}/api/node/{uuid}/{instance}/roi/{roi}')
r.raise_for_status()
return r.json()
# Synonym to avoid conflicts with roi.fetch_roi()
fetch_annotation_roi = fetch_roi
@dvid_api_wrapper
def fetch_elements(server, uuid, instance, box_zyx, *, format='json', session=None): #@ReservedAssignment
"""
Returns all point annotations within the given box.
Note:
Automatically includes relationships if format=True,
and automatically discards relationships if format=False.
Note:
This function is best for fetching relatively
sparse annotations, to-do annotations.
For synapse annotations, see ``fetch_synapses_in_batches()``.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
box_zyx:
The bounds of the subvolume from which to fetch annotation elements.
Given as a pair of coordinates (start, stop), e.g. [(0,0,0), (10,20,30)],
in Z,Y,X order. It need not be block-aligned.
format:
Either 'json' or 'pandas'
If 'pandas', convert the elements into a dataframe
with separate columns for X,Y,Z and each property.
In the pandas case, relationships are discarded.
Returns:
JSON list
"""
assert format in ('json', 'pandas')
box_zyx = np.asarray(box_zyx)
shape = box_zyx[1] - box_zyx[0]
shape_str = '_'.join(map(str, shape[::-1]))
offset_str = '_'.join(map(str, box_zyx[0, ::-1]))
url = f'{server}/api/node/{uuid}/{instance}/elements/{shape_str}/{offset_str}'
data = fetch_generic_json(url, session=session)
# The endooint returns 'null' instead of an empty list, on old servers at least.
# But we always return a list.
data = data or []
if format == 'pandas':
return load_elements_as_dataframe(data)
else:
return data
def load_elements_as_dataframe(elements):
"""
Convert the given elements from JSON to a pandas DataFrame.
Note:
For synapse annotations in particular,
see ``load_synapses_as_dataframes()``
"""
pos = np.zeros((len(elements), 3), dtype=np.int32)
kinds = []
tags = []
prop_arrays = {}
for i, e in enumerate(elements):
pos[i] = e['Pos']
kinds.append(e['Kind'])
tags.append(e['Tags'])
if 'Prop' not in e or not e['Prop']:
continue
for k, v in e['Prop'].items():
pa = prop_arrays.get(k)
if pa is None:
pa = prop_arrays[k] = np.empty(len(elements), dtype=object)
pa[i] = v
return pd.DataFrame({'z': pos[:, 2], 'y': pos[:,1], 'x': pos[:,0],
'kind': kinds, 'tags': tags, **prop_arrays})
@dvid_api_wrapper
def fetch_all_elements(server, uuid, instance, format='json', *, session=None):
"""
Returns all point annotations in the entire data instance, which could exceed data
response sizes (set by server) if too many elements are present. This should be
equivalent to the /blocks endpoint but without the need to determine extents.
The returned stream of data is the same as /blocks endpoint.
"""
url = f'{server}/api/node/{uuid}/{instance}/all-elements'
return fetch_generic_json(url, session=session)
@dvid_api_wrapper
def post_elements(server, uuid, instance, elements, kafkalog=True, *, session=None):
"""
Adds or modifies point annotations.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
elements:
Elements as JSON data (a python list-of-dicts).
This is the same format as returned by fetch_elements().
It is NOT the format returned by fetch_blocks().
If your data came from fetch_blocks(), you must extract and concatenate the values of that dict.
kafkalog:
If True, log kafka events for each posted element.
Example:
from itertools import chain
blocks = fetch_blocks(server, uuid, instance_1, box)
elements = list(chain(*blocks.values()))
post_elements(server, uuid, instance_2, elements)
"""
params = {}
if not kafkalog or kafkalog == 'off':
params['kafkalog'] = 'off'
r = session.post(f'{server}/api/node/{uuid}/{instance}/elements', json=elements, params=params)
r.raise_for_status()
@dvid_api_wrapper
def fetch_blocks(server, uuid, instance, box_zyx, *, session=None):
"""
Returns all point annotations within all blocks that intersect the given box.
This differs from fetch_elements() in the following ways:
- All annotations in the intersecting blocks are returned,
even annotations that lie outside of the specified box.
- The return value is a dict instead of a list.
Note: Automatically includes relationships.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
box_zyx:
The bounds of the subvolume from which to fetch annotation elements.
Given as a pair of coordinates (start, stop), e.g. [(0,0,0), (10,20,30)],
in Z,Y,X order. It need not be block-aligned.
Returns:
JSON dict { block_id : element-list }
"""
box_zyx = np.asarray(box_zyx)
shape = box_zyx[1] - box_zyx[0]
shape_str = '_'.join(map(str, shape[::-1]))
offset_str = '_'.join(map(str, box_zyx[0, ::-1]))
url = f'{server}/api/node/{uuid}/{instance}/blocks/{shape_str}/{offset_str}'
return fetch_generic_json(url, session=session)
@dvid_api_wrapper
def post_blocks(server, uuid, instance, blocks_json, kafkalog=False, *, session=None):
"""
Unlike the POST /elements endpoint, the /blocks endpoint is the fastest way to store
all point annotations and assumes the caller has (1) properly partitioned the elements
int the appropriate block for the block size (default 64) and (2) will do a POST /reload
to create the denormalized Label and Tag versions of the annotations after all
ingestion is completed.
This low-level ingestion also does not transmit subscriber events to associated
synced data (e.g., labelsz).
The POSTed JSON should be similar to the GET version with the block coordinate as
the key:
{
"10,381,28": [ array of point annotation elements ],
"11,381,28": [ array of point annotation elements ],
...
}
"""
params = {}
if not kafkalog:
params['kafkalog'] = 'off'
url = f'{server}/api/node/{uuid}/{instance}/blocks'
data = ujson.dumps(blocks_json).encode('utf-8')
r = session.post(url, data=data, params=params)
r.raise_for_status()
@dvid_api_wrapper
def delete_element(server, uuid, instance, coord_zyx, kafkalog=True, *, session=None):
"""
Deletes a point annotation given its location.
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
instance:
dvid annotations instance name, e.g. 'synapses'
coord_zyx:
coordinate (Z,Y,X)
kafkalog:
If True, log this deletion in kafka. Otherwise, don't.
"""
assert len(coord_zyx) == 3
coord_str = '_'.join(map(str, coord_zyx[::-1]))
params = {}
if not kafkalog:
params['kafkalog'] = 'off'
r = session.delete(f'{server}/api/node/{uuid}/{instance}/element/{coord_str}', params=params)
r.raise_for_status()
class SynapseWarning(UserWarning):
pass
def load_synapses_as_dataframes(elements, return_both_partner_tables=False):
"""
Load the given JSON elements as synapses a DataFrame.
Args:
elements:
JSON list of synapse annotation elements as returned by
fetch_elements(), etc.
return_both_partner_tables:
Debugging feature.
Helps detect DVID data inconsistencies, if used correctly.
If True, return two separate partner tables, computed
from the PreSyn and PostSyn relationship data, respectively.
That is, pre_partner_df contains the pre->post pairs found
in the 'PreSynTo' relationships, and post_partner_df contains
the pre->post found in the 'PostSynTo' relationships.
Note that the two tables will likely NOT be identical,
unless the given elements include every synapse in your volume.
By default, combine (and de-duplicate) the two tables.
Returns:
point_df:
One row for every t-bar and psd in the file, indicating its
location, confidence, and synapse type (PostSyn or PreSyn)
Columns: ['z', 'y', 'x', 'conf', 'kind', 'user']
Index: np.uint64, an encoded version of [z,y,x]
[post_]partner_df:
Indicates which T-bar each PSD is associated with.
One row for every psd in the file.
Columns: ['pre_id', 'post_id']
where the values correspond to the index of point_df.
Note:
It can generally be assumed that for the synapses we
load into dvid, every PSD (PostSyn) is
associated with exactly one T-bar (PreSyn).
[pre_partner_df]:
Only returned if return_both_partner_tables=True
"""
#with warnings.catch_warnings():
# warnings.simplefilter("once", category=SynapseWarning)
return _load_synapses_as_dataframes(elements, return_both_partner_tables)
def _load_synapses_as_dataframes(elements, return_both_partner_tables):
if not elements:
point_df = pd.DataFrame([], columns=['x', 'y', 'z', 'kind', 'conf', 'user'])
partner_df = pd.DataFrame([], columns=['post_id', 'pre_id'], dtype=np.uint64)
if return_both_partner_tables:
return point_df, partner_df, partner_df
else:
return point_df, partner_df
# Accumulating separate lists for each column ought to be
# faster than building a list-of-tuples, I think.
# Primary columns
xs = []
ys = []
zs = []
kinds = []
confs = []
users = []
# Relationship coordinates
# [(pre_z, pre_y, pre_x, post_z, post_y, post_x), ...]
pre_rel_points = []
post_rel_points = []
need_fake_point = False
for e in elements:
x,y,z = e['Pos']
xs.append(x)
ys.append(y)
zs.append(z)
kinds.append( e['Kind'] )
confs.append( float(e.get('Prop', {}).get('conf', 0.0)) )
users.append( e.get('Prop', {}).get('user', '') )
if 'Rels' not in e or len(e['Rels']) == 0:
# In general, there should never be
# a tbar or psd with no relationships at all.
# That indicates an inconsistency in the database.
# To keep track of such cases, we add a special connection to point (0,0,0).
#warnings.warn("At least one synapse had no relationships! "
# "Adding artificial partner(s) to (0,0,0).",
# SynapseWarning)
need_fake_point = True
if e['Kind'] == 'PreSyn':
pre_rel_points.append( (z,y,x, 0,0,0) )
else:
post_rel_points.append( (0,0,0, z,y,x) )
else:
for rel in e['Rels']:
rx, ry, rz = rel['To']
if rx == ry == rz == 0:
# We usually assume (0,0,0) is not a real synapse, so it can be used in the case of "orphan" synapses.
# But in this case, apparently a real synapse was found at (0,0,0), obfuscating the warning above.
warnings.warn("Huh? The fetched synapse data actually contains a relationship to point (0,0,0)!")
if e['Kind'] == 'PreSyn':
pre_rel_points.append( (z,y,x, rz,ry,rx) )
else:
post_rel_points.append( (rz,ry,rx, z,y,x) )
# See warning above.
if need_fake_point:
xs.append(0)
ys.append(0)
zs.append(0)
kinds.append('Fake')
confs.append(0.0)
users.append('neuclease.dvid.annotation.load_synapses_as_dataframes')
point_df = pd.DataFrame( {'z': zs, 'y': ys, 'x': xs}, dtype=np.int32 )
kind_dtype = pd.CategoricalDtype(categories=["PreSyn", "PostSyn", "Fake"], ordered=False)
point_df['kind'] = pd.Series(kinds, dtype=kind_dtype)
point_df['conf'] = pd.Series(confs, dtype=np.float32)
point_df['user'] = pd.Series(users, dtype='category')
point_df.index = encode_coords_to_uint64(point_df[['z', 'y', 'x']].values)
point_df.index.name = 'point_id'
def construct_partner_df(rel_points):
if rel_points:
rel_points = np.array(rel_points, np.int32)
pre_partner_ids = encode_coords_to_uint64(rel_points[:, :3])
post_partner_ids = encode_coords_to_uint64(rel_points[:, 3:])
else:
pre_partner_ids = np.zeros((0,), dtype=np.uint64)
post_partner_ids = np.zeros((0,), dtype=np.uint64)
partner_df = pd.DataFrame({'pre_id': pre_partner_ids, 'post_id': post_partner_ids})
return partner_df
pre_partner_df = construct_partner_df(pre_rel_points)
post_partner_df = construct_partner_df(post_rel_points)
if return_both_partner_tables:
return point_df, pre_partner_df, post_partner_df
# For synapses near block borders, maybe only the PreSyn or
# only the PostSyn happens to be in the given elements.
# But in most cases, both PreSyn and PostSyn are present,
# and therefore the relationship is probably listed twice.
# Drop duplicates.
partner_df = pd.concat((pre_partner_df, post_partner_df), ignore_index=True)
partner_df.drop_duplicates(inplace=True)
return point_df, partner_df
def fetch_bodies_for_synapses(server, uuid, seg_instance, point_df=None, partner_df=None, batch_size=10_000, threads=0, processes=0):
from .labelmap import fetch_labels_batched
if point_df is not None:
bodies = fetch_labels_batched(server, uuid, seg_instance, point_df[['z', 'y', 'x']].values,
batch_size=batch_size, threads=threads, processes=processes)
point_df['body'] = bodies
if partner_df is not None:
pre_coords = decode_coords_from_uint64(partner_df['pre_id'].values)
post_coords = decode_coords_from_uint64(partner_df['post_id'].values)
partner_df['pre_body'] = fetch_labels_batched(server, uuid, seg_instance, pre_coords,
batch_size=batch_size, threads=threads, processes=processes)
partner_df['post_body'] = fetch_labels_batched(server, uuid, seg_instance, post_coords,
batch_size=batch_size, threads=threads, processes=processes)
def fetch_synapses_in_batches(server, uuid, synapses_instance, bounding_box_zyx=None, batch_shape_zyx=(256,256,64000),
format='pandas', endpoint='blocks', processes=8, #@ReservedAssignment
check_consistency=False, return_both_partner_tables=False):
"""
Fetch all synapse annotations for the given labelmap volume (or subvolume) and synapse instance.
Box-shaped regions are queried in batches according to the given batch shape.
Returns either the raw JSON or a pandas DataFrame.
Note:
Every synapse should have at least one partner (relationship).
If a synapse is found without a partner, that indicates a problem with the database.
In that case, a warning is emitted and the synapse is given an artificial partner to point (0,0,0).
Note:
On the hemibrain dataset (~70 million points),
this function takes ~4 minutes if you use 32 processes.
Warning:
For large volumes with many synapses, the 'json' format requires a lot of RAM,
and is not particularly convenient to save/load.
See also:
``save_synapses_npy()``, ``load_synapses_npy()``
Args:
server:
dvid server, e.g. 'emdata3:8900'
uuid:
dvid uuid, e.g. 'abc9'
synapses_instance:
dvid annotations instance name, e.g. 'synapses'
bounding_box_zyx:
The bounds of the subvolume from which to fetch synapse annotations.
Given as a pair of coordinates (start, stop), e.g. [(0,0,0), (256,1024,1024)],
in Z,Y,X order. It must be block-aligned.
If not provided, the entire bounding box of the sync'd
labelmap instance (e.g. 'segmentation') is used.
batch_shape_zyx:
What box shape to use for each /elements request.
Must be block-aligned (i.e. multiple of 64px in all dimensions).
format:
Either 'json' or 'pandas'. If 'pandas, return a DataFrame.
endpoint:
Either 'blocks' (faster) or 'elements' (supported on older servers).
check_consistency:
DVID debug feature. Checks for consistency in the response to the /blocks endpoint.
return_both_partner_tables:
Debugging feature.
Helps detect DVID data inconsistencies, if used correctly.
If True, return two separate partner tables, computed
from the PreSyn and PostSyn relationship data, respectively.
That is, pre_partner_df contains the pre->post pairs found
in the 'PreSynTo' relationships, and post_partner_df contains
the pre->post found in the 'PostSynTo' relationships.
Note that the two tables will likely NOT be identical,
unless the given elements include every synapse in your volume.
When return_both_partner_tables=False, then automatically combine
(and de-duplicate) the two tables.
Returns:
If format == 'json', a list of JSON elements.
If format == 'pandas', returns two or three dataframes,
depending on return_both_partner_tables:
point_df:
One row for every t-bar and psd in the file, indicating its
location, confidence, and synapse type (PostSyn or PreSyn)
Columns: ['z', 'y', 'x', 'conf', 'kind', 'user']
Index: np.uint64, an encoded version of [z,y,x]
[pre_]partner_df:
Indicates which T-bar each PSD is associated with.
One row for every psd in the file.
Columns: ['pre_id', 'post_id']
where the values correspond to the index of point_df.
Note:
It can generally be assumed that for the synapses we
load into dvid, every PSD (PostSyn) is
associated with exactly one T-bar (PreSyn).
[post_partner_df]:
Only returned if return_both_partner_tables=True
"""
assert format in ('pandas', 'json')
assert endpoint in ('blocks', 'elements')
assert not return_both_partner_tables or format == 'pandas', \
"return_both_partner_tables does not apply unless you're asking for pandas format"
if bounding_box_zyx is None or isinstance(bounding_box_zyx, str):
# Determine name of the segmentation instance that's
# associated with the given synapses instance.
syn_info = fetch_instance_info(server, uuid, synapses_instance)
seg_instance = syn_info["Base"]["Syncs"][0]
if isinstance(bounding_box_zyx, str):
assert bounding_box_zyx == seg_instance, \
("The segmentation instance name you provided doesn't match the name of the sync'd instance.\n"
"Please provide an explicit bounding-box.")
bounding_box_zyx = fetch_volume_box(server, uuid, seg_instance)
else:
bounding_box_zyx = np.asarray(bounding_box_zyx)
assert (bounding_box_zyx % 64 == 0).all(), "box must be block-aligned"
batch_shape_zyx = np.asarray(batch_shape_zyx)
assert (batch_shape_zyx % 64 == 0).all(), "batch shape must be block-aligned"
boxes = [*boxes_from_grid(bounding_box_zyx, Grid(batch_shape_zyx))]
fn = partial(_fetch_synapse_batch, server, uuid, synapses_instance,
format=format, endpoint=endpoint, check_consistency=check_consistency,
return_both_partner_tables=return_both_partner_tables)
initializer = None
#initializer = lambda: warnings.simplefilter("once", category=SynapseWarning)
results = compute_parallel(fn, boxes, processes=processes, ordered=False, leave_progress=True, initializer=initializer)
if format == 'json':
return list(chain(*results))
elif format == 'pandas':
if return_both_partner_tables:
point_dfs, pre_partner_dfs, post_partner_dfs = zip(*results)
pre_partner_dfs = [*filter(len, pre_partner_dfs)]
post_partner_dfs = [*filter(len, post_partner_dfs)]
else:
point_dfs, partner_dfs = zip(*results)
partner_dfs = [*filter(len, partner_dfs)]
# Any zero-length dataframes might have the wrong dtypes,
# which would screw up the concat step. Remove them.
point_dfs = [*filter(len, point_dfs)]
if len(point_dfs) == 0:
# Return empty dataframe
return load_synapses_as_dataframes([], return_both_partner_tables)
point_df = pd.concat(point_dfs)
# Make sure user and kind are Categorical
point_df['kind'] = point_df['kind'].astype("category")
point_df['user'] = point_df['user'].astype("category")
# If any 'fake' synapses were added due to inconsistent data,
# Drop duplicates among them.
if (point_df['kind'] == "Fake").any():
# All fake rows are the same. Drop all but the first.
fake_df = point_df.query('kind == "Fake"').iloc[0:1]
point_df = pd.concat((fake_df, point_df.query('kind != "Fake"')))
# Sort, mostly to ensure that the Fake point (if any) is at the top.
point_df.sort_values(['z', 'y', 'x'], inplace=True)
if return_both_partner_tables:
pre_partner_df = pd.concat(pre_partner_dfs, ignore_index=True)
post_partner_df = pd.concat(post_partner_dfs, ignore_index=True)
return point_df, pre_partner_df, post_partner_df
else:
partner_df = pd.concat(partner_dfs, ignore_index=True)
partner_df.drop_duplicates(inplace=True)
return point_df, partner_df
def _fetch_synapse_batch(server, uuid, synapses_instance, batch_box, format, endpoint, # @ReservedAssignment
check_consistency, return_both_partner_tables):
"""
Helper for fetch_synapses_in_batches(), above.
As a special check, if format 'pandas' is used, we also check for dvid inconsistencies.
"""
assert not check_consistency or endpoint == 'blocks', \
"check_consistency can only be used with the blocks endpoint."
if endpoint == 'blocks':
blocks = fetch_blocks(server, uuid, synapses_instance, batch_box)
elements = list(chain(*blocks.values()))
if check_consistency:
for key, els in blocks.items():
if len(els) == 0:
continue
block = [int(c) for c in key.split(',')]
block_box = 64*np.array((block, block))
block_box[1] += 64
pos = np.array([e['Pos'] for e in els])
if (pos < block_box[0]).any() or (pos >= block_box[1]).any():
msg = ("Detected a DVID inconsistency: Some elements fetched from block "
f"at {block_box[0, ::-1].tolist()} (XYZ) fall outside the block!")
raise RuntimeError(msg)
elif endpoint == 'elements':
elements = fetch_elements(server, uuid, synapses_instance, batch_box)
else:
raise AssertionError("Invalid endpoint choice")
if format == 'json':
return elements
if return_both_partner_tables:
point_df, pre_partner_df, post_partner_df = load_synapses_as_dataframes(elements, True)
return point_df, pre_partner_df, post_partner_df
else:
point_df, partner_df = load_synapses_as_dataframes(elements, False)
return point_df, partner_df
def save_synapses_npy(synapse_point_df, npy_path, save_index=None):
"""
Save the given synapse point DataFrame to a .npy file,
with careful handling of strings to avoid creating any
pickled objects (which are annoying to load).
"""
assert save_index in (True, False, None)
if save_index is None:
save_index = (synapse_point_df.index.name is not None)
dtypes = {}
# Avoid 'pickle' objects (harder to load) by converting
# categories/strings to fixed-width strings
max_kind = synapse_point_df['kind'].map(len).astype(int).max()
dtypes['kind'] = f'U{max_kind}'
if 'user' in synapse_point_df:
max_user = synapse_point_df['user'].map(len).astype(int).max()
dtypes['user'] = f'U{max_user}'
np.save(npy_path, synapse_point_df.to_records(index=save_index, column_dtypes=dtypes))
def load_synapses_npy(npy_path):
"""
Load the given .npy file as a synapse point DataFrame,
with special handling of the string columns to use
categorical dtypes (saves RAM).
"""
records = np.load(npy_path, allow_pickle=True)
numeric_cols = ['z', 'y', 'x', 'conf', 'label', 'body', 'sv']
numeric_cols = [*filter(lambda c: c in records.dtype.names, numeric_cols)]
df = pd.DataFrame(records[numeric_cols])
if 'point_id' in records.dtype.names:
df.index = records['point_id']
df['kind'] = pd.Series(records['kind'], dtype='category')
if 'user' in records.dtype.names:
df['user'] = pd.Series(records['user'], dtype='category')
return df
def save_synapses_csv(synapse_point_df, csv_path, index=False):
"""
Save the given synapse points table to CSV.
Note:
Usually it's more efficient to read/write .npy files.
See ``save_synapses_npy()``.
"""
synapse_point_df.to_csv(csv_path, header=True, index=index)
def load_synapses_csv(csv_path):
"""
Convenience function for reading saved synapse
table from CSV with the proper dtypes.
Note:
Usually it's more efficient to read/write .npy files.
See ``load_synapses_npy()``.
"""
dtype = { 'x': np.int32,
'y': np.int32,
'z': np.int32,
'kind': 'category',
'conf': np.float32,
'user': 'category',
'label': np.uint64,
'body': np.uint64,
'sv': np.uint64 }
return pd.read_csv(csv_path, header=0, dtype=dtype)
def load_synapses(path):
"""
Load synapse points from the given file path.
"""
if isinstance(path, pd.DataFrame):
return path
assert isinstance(path, str)
_, ext = os.path.splitext(path)
assert ext in ('.csv', '.npy', '.json')
if ext == '.csv':
points_df = load_synapses_csv(path)
elif ext == '.npy':
points_df = load_synapses_npy(path)
elif ext == '.json':
points_df, _partner_df = load_synapses_from_json()
return points_df
def load_synapses_from_json(json_path, batch_size=1000):
"""
Load the synapses to a dataframe from a JSON file
(which must have the same structure as the elements response from DVID).
The JSON file is consumed in batches, avoiding the need
to load the entire JSON document in RAM at once.
"""
point_dfs = []
partner_dfs = []
try:
with open(json_path, 'r') as f:
for elements in tqdm_proxy( gen_json_objects(f, batch_size) ):
point_df, partner_df = load_synapses_as_dataframes(elements)
point_dfs.append(point_df)
partner_dfs.append(partner_df)
except KeyboardInterrupt:
msg = f"Stopping early due to KeyboardInterrupt. ({len(point_dfs)} batches completed)\n"
sys.stderr.write(msg)
point_df = pd.concat(point_dfs)
partner_df = pd.concat(partner_dfs)
return point_df, partner_df
def load_relationships(elements, kind=None):
"""
Given a list of JSON elements, load all relationships as a table.
"""
from_x = []
from_y = []
from_z = []
to_x = []
to_y = []
to_z = []
rels = []
for element in tqdm_proxy(elements):
if kind and (kind != element['Kind']):
continue
fx, fy, fz = element['Pos']
for obj in element['Rels']:
tx, ty, tz = obj['To']
from_x.append(fx)
from_y.append(fy)
from_z.append(fz)
to_x.append(tx)
to_y.append(ty)
to_z.append(tz)
rels.append(obj['Rel'])
df = pd.DataFrame( {'from_x': from_x,
'from_y': from_y,
'from_z': from_z,
'to_x': to_x,
'to_y': to_y,
'to_z': to_z,
}, dtype=np.int32 )
df['rel'] = pd.Series(rels, dtype='category')
return df
def compute_weighted_edge_table(relationships_df, synapses_df):
"""
Given a synapse 'relationship table' with columns [from_x, from_y, from_z, to_x, to_y, to_z],
and a synapse table with columns [x, y, z, body],
Perform the necessary merge operations to determine from_body and to_body for each relationship,
and then aggregate those relationships to to yield a table of weights for each unique body pair.
"""
from_bodies = relationships_df.merge(synapses_df[['z', 'y', 'x', 'body']], how='left',
left_on=['from_z', 'from_y', 'from_x'],
right_on=['z', 'y', 'x'])['body']
to_bodies = relationships_df.merge(synapses_df[['z', 'y', 'x', 'body']], how='left',
left_on=['to_z', 'to_y', 'to_x'],
right_on=['z', 'y', 'x'])['body']
edge_table = pd.DataFrame({'from_body': from_bodies,
'to_body': to_bodies})
weighted_edge_table = edge_table.groupby(['from_body', 'to_body']).size()
weighted_edge_table.sort_values(ascending=False, inplace=True)
weighted_edge_table.name = 'weight'
return weighted_edge_table.reset_index()
def load_gary_synapse_json(path, processes=8, batch_size=100_000):
"""
Load a synapse json file from Gary's format into two tables.
Args:
path:
A path to a .json file.
See ``neuclease/tests/test_annotation.py`` for an example.
processes:
How many processes to use in parallel to load the data.
batch_size:
The size (number of t-bars) to processes per batch during multiprocessing.
Returns:
point_df:
One row for every t-bar and psd in the file.
Columns: ['z', 'y', 'x', 'confidence', 'kind']
Index: np.uint64, an encoded version of [z,y,x]
partner_df:
Indicates which T-bar each PSD is associated with.
One row for every psd in the file.
Columns: ['post_id', 'pre_id']
where the values correspond to the index of point_df.
Note:
Gary guarantees that every PSD (PostSyn) is
associated with exactly 1 T-bar (PreSyn).
"""
logger.info(f"Loading JSON data from {path}")
with open(path, 'r') as f:
data = ujson.load(f)["data"]
if processes == 0:
logger.info("Generating tables in the main process (not parallel).")
return _load_gary_synapse_data(data)
batches = []
for batch_start in range(0, len(data), batch_size):
batches.append(data[batch_start:batch_start+batch_size])
logger.info(f"Converting via {len(batches)} batches (using {processes} processes).")
results = compute_parallel(_load_gary_synapse_data, batches, processes=processes)
point_dfs, partner_dfs = zip(*results)
logger.info("Combining results")
point_df = pd.concat(point_dfs)
partner_df = pd.concat(partner_dfs, ignore_index=True)
return point_df, partner_df
def _load_gary_synapse_data(data):
"""
Helper for load_gary_synapse_json()
"""
point_table = []
confidences = []
kinds = []
partner_table = []
for syn in data:
tx, ty, tz = syn["T-bar"]["location"]
confidence = float(syn["T-bar"]["confidence"])
point_table.append( (tz, ty, tx) )
confidences.append( confidence )
kinds.append('PreSyn')
for partner in syn["partners"]:
px, py, pz = partner["location"]
confidence = float(partner["confidence"])
point_table.append( (pz, py, px) )
confidences.append(confidence)
kinds.append('PostSyn')
partner_table.append( (tz, ty, tx, pz, py, px) )
points = np.array(point_table, np.int32)
point_df = pd.DataFrame(points, columns=['z', 'y', 'x'], dtype=np.int32)
point_df['conf'] = np.array(confidences, np.float32)
point_df['kind'] = pd.Series(kinds, dtype='category')
point_ids = encode_coords_to_uint64(points)
point_df.index = point_ids
point_df.index.name = 'point_id'
partner_points = np.array(partner_table, dtype=np.int32)
tbar_partner_ids = encode_coords_to_uint64(partner_points[:,:3])
psd_partner_ids = encode_coords_to_uint64(partner_points[:,3:])
partner_df = pd.DataFrame({'post_id': psd_partner_ids, 'pre_id': tbar_partner_ids})
return point_df, partner_df
def body_synapse_counts(synapse_samples):
"""
Given a DataFrame of sampled synapses (or a path to a CSV file),
Tally synapse totals (by kind) for each body.
Returns:
DataFrame with columns: ['PreSyn', 'PostSyn'], indexed by 'body'.
(The PreSyn/PostSyn columns are synapse counts.)
"""
if isinstance(synapse_samples, str):
synapse_samples = pd.read_csv(synapse_samples)
assert 'body' in synapse_samples.columns, "Samples must have a 'body' col."
assert 'kind' in synapse_samples.columns, "Samples must have a 'kind' col"
synapse_samples = synapse_samples[['body', 'kind']]
synapse_counts = synapse_samples.pivot_table(index='body', columns='kind', aggfunc='size')
synapse_counts.fillna(0.0, inplace=True)
if 0 in synapse_counts.index:
msg = ("*** Synapse table includes body 0 and was therefore probably generated "
"from out-of-date data OR some synapses in your data fall on voxels with "
"no label (label 0). ***")
logger.warning(msg)
synapse_counts['PostSyn'] = synapse_counts['PostSyn'].astype(np.int32)
synapse_counts['PreSyn'] = synapse_counts['PreSyn'].astype(np.int32)
# Convert columns from categorical index to normal index,
# so the caller can easily append their own columns if they want.
synapse_counts.columns = synapse_counts.columns.tolist()
return synapse_counts[['PreSyn', 'PostSyn']]
def fetch_roi_synapses(server, uuid, synapses_instance, rois, fetch_labels=False, return_partners=False, processes=16):
"""
Fetch the coordinates and (optionally) body labels for
all synapses that fall within the given ROIs.
Args:
server:
DVID server, e.g. 'emdata4:8900'
uuid:
DVID uuid, e.g. 'abc9'
synapses_instance:
DVID synapses instance name, e.g. 'synapses'
rois:
A single DVID ROI instance names or a list of them, e.g. 'EB' or ['EB', 'FB']
fetch_labels:
If True, also fetch the supervoxel and body label underneath each synapse,
returned in columns 'sv' and 'body'.
return_partners:
If True, also return the partners table.
processes:
How many parallel processes to use when fetching synapses and supervoxel labels.
Returns:
pandas DataFrame with columns:
``['z', 'y', 'x', 'kind', 'conf']`` and ``['sv', 'body']`` (if ``fetch_labels=True``)
If return_partners is True, also return the partners table.
Example:
df = fetch_roi_synapses('emdata4:8900', '3c281', 'synapses', ['PB(L5)', 'PB(L7)'], True, 8)
"""
# Late imports to avoid circular imports in dvid/__init__
from neuclease.dvid import fetch_combined_roi_volume, determine_point_rois, fetch_labels_batched, fetch_mapping, fetch_mappings
assert rois, "No rois provided, result would be empty. Is that what you meant?"
if isinstance(rois, str):
rois = [rois]
# Determine name of the segmentation instance that's
# associated with the given synapses instance.
syn_info = fetch_instance_info(server, uuid, synapses_instance)
seg_instance = syn_info["Base"]["Syncs"][0]
logger.info(f"Fetching mask for ROIs: {rois}")
# Fetch the ROI as a low-res array (scale 5, i.e. 32-px resolution)
roi_vol_s5, roi_box_s5, overlapping_pairs = fetch_combined_roi_volume(server, uuid, rois)
if len(overlapping_pairs) > 0:
logger.warning("Some ROIs overlapped and are thus not completely represented in the output:\n"
f"{overlapping_pairs}")
# Convert to full-res box
roi_box = (2**5) * roi_box_s5
# fetch_synapses_in_batches() requires a box that is 64-px-aligned
roi_box = round_box(roi_box, 64, 'out')
logger.info("Fetching synapse points")
# points_df is a DataFrame with columns for [z,y,x]
points_df, partners_df = fetch_synapses_in_batches(server, uuid, synapses_instance, roi_box, processes=processes)
# Append a 'roi_name' column to points_df
logger.info("Labeling ROI for each point")
determine_point_rois(server, uuid, rois, points_df, roi_vol_s5, roi_box_s5)
logger.info("Discarding points that don't overlap with the roi")
rois = {*rois}
points_df = points_df.query('roi in @rois').copy()
columns = ['z', 'y', 'x', 'kind', 'conf', 'roi_label', 'roi']
if fetch_labels:
logger.info("Fetching supervoxel under each point")
svs = fetch_labels_batched(server, uuid, seg_instance,
points_df[['z', 'y', 'x']].values,
supervoxels=True,
processes=processes)
with Timer("Mapping supervoxels to bodies", logger):
# Arbitrary heuristic for whether to do the
# body-lookups on DVID or on the client.
if len(svs) < 100_000:
bodies = fetch_mapping(server, uuid, seg_instance, svs)
else:
mapping = fetch_mappings(server, uuid, seg_instance)
mapper = LabelMapper(mapping.index.values, mapping.values)
bodies = mapper.apply(svs, True)
points_df['sv'] = svs
points_df['body'] = bodies
columns += ['body', 'sv']
if return_partners:
# Filter
#partners_df = partners_df.query('post_id in @points_df.index and pre_id in @points_df.index').copy()
# Faster filter (via merge)
partners_df = partners_df.merge(points_df[[]], 'inner', left_on='pre_id', right_index=True)
partners_df = partners_df.merge(points_df[[]], 'inner', left_on='post_id', right_index=True)
return points_df[columns], partners_df
else:
return points_df[columns]
def determine_bodies_of_interest(server, uuid, synapses_instance, rois=None, min_tbars=2, min_psds=10, processes=16, *, synapse_table=None, seg_instance=None):
"""
Determine which bodies fit the given criteria
for minimum synapse counts WITHIN the given ROIs.
Note that the min_tbars and min_psds criteria are OR'd together.
A body need only match at least one of the criteria to be considered "of interest".
This function is just a convenience wrapper around calling
fetch_roi_synapses(), fetch_labels_batched(), and body_synapse_counts().
Note:
If your synapse table is already loaded and already has a 'body' column,
and you aren't providing any rois to filter with, then this function is
merely equivalent to calling body_synapse_counts() and filtering it
for tbar/psd requirements.
Args:
server:
dvid server
uuid:
dvid uuid
synapses_instance:
synapses annotation instance name, e.g. 'synapses'
If you are providing a pre-loaded synapse_table and overriding seg_instance,
you can set synapses_instance=None.
rois:
A list of ROI instance names. If provided, ONLY synapses
within these ROIs will be counted when determining bodies of interest.
If not provided, all synapses in the volume will be counted.
min_tbars:
All bodies with at least this many t-bars (PreSyn annotations) will be "of interest".
min_psds:
All bodies with at least this many PSDs (PostSyn annotations) will be "of interest".
processes:
How many parallel processes to use when fetching synapses and body labels.
synapse_table:
If you have a pre-loaded synapse table (or a path to one stored as .npy or .csv),
you may provide it here, in which case the synapse points won't be fetched from DVID.
Furthermore, if the table already contains a 'body' column, then it is presumed to be
accurate and body labels will not be fetched from DVID.
seg_instance:
If you want to override the segmentation instance name to use
(rather than inspecting the syanapse instance syncs), provide it here.
Returns:
pandas DataFrame, as returned by body_synapse_counts().
That is, DataFrame with columns: ['PreSyn', 'PostSyn'], indexed by 'body',
where only bodies of interest are included in the table.
"""
from neuclease.dvid import fetch_labels_batched, fetch_combined_roi_volume, determine_point_rois
# Download synapses if necessary
if synapse_table is None:
with Timer("Fetching synapse points", logger):
if rois is None:
# Fetch all synapses in the volume
points_df, _partners_df = fetch_synapses_in_batches(server, uuid, synapses_instance, processes=processes)
else:
# Fetch only the synapses within the given ROIs
points_df = fetch_roi_synapses(server, uuid, synapses_instance, rois, False, processes=processes)
else:
# User provided a pre-loaded synapse table (or a path to one)
if isinstance(synapse_table, str):
with Timer(f"Loading synapse table {synapse_table}", logger):
_, ext = os.path.splitext(synapse_table)
assert ext in ('.csv', '.npy')
if ext == '.csv':
synapse_table = load_synapses_csv(synapse_table)
elif ext == '.npy':
synapse_table = load_synapses_npy(synapse_table)
assert isinstance(synapse_table, pd.DataFrame)
assert not ({'z', 'y', 'x', 'kind'} - {*synapse_table.columns}), \
"Synapse table does not contain all expected columns"
points_df = synapse_table
if rois:
roi_vol_s5, roi_box_s5, _ = fetch_combined_roi_volume(server, uuid, rois)
determine_point_rois(server, uuid, rois, points_df, roi_vol_s5, roi_box_s5)
points_df = points_df.query('roi_label != 0')
if 'body' in points_df:
logger.info("Using user-provided body labels")
else:
with Timer("Fetching synapse body labels", logger):
if seg_instance is None:
syn_info = fetch_instance_info(server, uuid, synapses_instance)
seg_instance = syn_info["Base"]["Syncs"][0]
points_df['body'] = fetch_labels_batched( server, uuid, seg_instance,
points_df[['z', 'y', 'x']].values,
processes=processes )
with Timer("Aggregating body-wise synapse counts"):
body_synapses_df = body_synapse_counts(points_df)
body_synapses_df = body_synapses_df.query('PreSyn >= @min_tbars or PostSyn >= @min_psds')
return body_synapses_df
ConsistencyResults = namedtuple("ConsistencyResults",
["orphan_tbars", "orphan_psds",
"pre_dupes", "post_dupes",
"only_in_tbar", "only_in_psd",
"bad_tbar_refs", "bad_psd_refs",
"oversubscribed_post", "oversubscribed_pre"])
def check_synapse_consistency(syn_point_df, pre_partner_df, post_partner_df):
"""
Given a synapse point table and TWO partners tables as returned when
calling ``fetch_synapses_in_batches(..., return_both_partner_tables=True)``,
Analyze the relationships to look for inconsistencies.
Note:
There are different types of results returned,
and they are not mutually exclusive.
For example, "orphan tbars" will also count toward
"non-reciprocal relationships", and also contribute to the "oversubscribed"
counts (since the orphans are artificially partnered to (0,0,0), which ends
up counting as oversubscribed).
"""
# 'Orphan' points (a tbar or psd with no relationships at all)
orphan_tbars = pre_partner_df.query('post_id == 0')
orphan_psds = post_partner_df.query('pre_id == 0')
logger.info(f"Found {len(orphan_tbars)} orphan TBars")
logger.info(f"Found {len(orphan_psds)} orphan psds")
# Duplicate connections (one tbar references the same PSD twice or more)
pre_dupes = pre_partner_df.loc[pre_partner_df.duplicated()].drop_duplicates()
post_dupes = post_partner_df.loc[post_partner_df.duplicated()].drop_duplicates()
logger.info(f"Found {len(pre_dupes)} duplicated tbar->psd relationships.")
logger.info(f"Found {len(post_dupes)} duplicated psd<-tbar relationships.")
# Non-reciprocal (Tbar references PSD, but not the other way around, or vice-versa)
pre_nodupes_df = pre_partner_df.drop_duplicates()
merged = pre_nodupes_df.merge(post_partner_df.drop_duplicates(), 'outer', ['pre_id', 'post_id'], indicator='which')
only_in_tbar = merged.query('which == "left_only"')
only_in_psd = merged.query('which == "right_only"')
logger.info(f"Found {len(only_in_tbar)} non-reciprocal relationships from TBars")
logger.info(f"Found {len(only_in_psd)} non-reciprocal relationships from PSDs")
# Refs to nowhere (Tbar or PSD has a relationship to a point that doesn't exist)
point_ids = syn_point_df.index
bad_tbar_refs = pre_partner_df.query('post_id not in @point_ids')
bad_psd_refs = post_partner_df.query('pre_id not in @point_ids')
logger.info(f"Found {len(bad_tbar_refs)} references to non-existent PSDs")
logger.info(f"Found {len(bad_psd_refs)} references to non-existent TBars")
# Too many refs from a single PSD
oversubscribed_post = post_partner_df.loc[post_partner_df.duplicated('post_id')]
oversubscribed_pre = pre_nodupes_df.loc[pre_nodupes_df.duplicated('post_id')]
logger.info(f"Found {len(oversubscribed_post)} PSDs that contain more than one relationship")
logger.info(f"Found {len(oversubscribed_pre)} PSDs that are referenced by more than one TBar")
return ConsistencyResults( orphan_tbars, orphan_psds,
pre_dupes, post_dupes,
only_in_tbar, only_in_psd,
bad_tbar_refs, bad_psd_refs,
oversubscribed_post, oversubscribed_pre )
def post_tbar_jsons(server, uuid, instance, partner_df, merge_existing=True, processes=32, chunk_shape=(256, 256, 64000)):
"""
Post a large set of tbars (including their PSD relationships) to dvid,
using the POST /blocks annotation endpoint.
If you're posting T-bars only, with no associated PSDs,
you can omit the _post coordinate columns.
The points will be divided into block-aligned sets, serialized as JSON,
and sent to DVID via multiple processes.
Args:
server, uuid, instance:
annotation instance info
partner_df:
A DataFrame containing the following columns:
[
# tbar coordinates
'z_pre', 'y_pre', 'x_pre',
# confidence
'conf_pre',
# psd coordinates
'z_post', 'y_post', 'x_post',
# unique ID for each tbar. Appended for you if this is missing.
'pre_id',
]
"""
logger.info("Computing chunk/block IDs")
if 'pre_id' not in partner_df.columns:
partner_df['pre_id'] = encode_coords_to_uint64(partner_df[['z_pre', 'y_pre', 'x_pre']].values)
partner_df['cz_pre'] = partner_df['z_pre'] // chunk_shape[0]
partner_df['cy_pre'] = partner_df['y_pre'] // chunk_shape[1]
partner_df['cx_pre'] = partner_df['x_pre'] // chunk_shape[2]
partner_df['cid_pre'] = encode_coords_to_uint64(partner_df[['cz_pre', 'cy_pre', 'cx_pre']].values)
partner_df['bz_pre'] = partner_df['z_pre'] // 64
partner_df['by_pre'] = partner_df['y_pre'] // 64
partner_df['bx_pre'] = partner_df['x_pre'] // 64
partner_df['bid_pre'] = encode_coords_to_uint64(partner_df[['bz_pre', 'by_pre', 'bx_pre']].values)
num_chunks = partner_df['cid_pre'].nunique()
_post = partial(_post_tbar_chunk, server, uuid, instance, chunk_shape, merge_existing)
compute_parallel(_post, partner_df.groupby(['cz_pre', 'cy_pre', 'cx_pre']),
total=num_chunks, processes=processes, ordered=False, starmap=True)
def _post_tbar_chunk(server, uuid, instance, chunk_shape, merge_existing, c_zyx, chunk_df):
block_jsons = {}
for (bz, by, bx), block_df in chunk_df.groupby(['bz_pre', 'by_pre', 'bx_pre']):
block_jsons[f"{bx},{by},{bz}"] = compute_tbar_jsons(block_df)
if merge_existing:
chunk_start = np.asarray(c_zyx) * chunk_shape
chunk_stop = chunk_start + chunk_shape
existing = fetch_blocks(server, uuid, instance, [chunk_start, chunk_stop])
for key in existing.keys():
if key in block_jsons:
block_jsons[key].extend(existing[key])
elif existing[key]:
block_jsons[key] = existing[key]
post_blocks(server, uuid, instance, block_jsons)
def post_psd_jsons(server, uuid, instance, partner_df, merge_existing=True, processes=32, chunk_shape=(256, 256, 64000)):
logger.info("Computing chunk/block IDs")
partner_df['cz_post'] = partner_df['z_post'] // chunk_shape[0]
partner_df['cy_post'] = partner_df['y_post'] // chunk_shape[1]
partner_df['cx_post'] = partner_df['x_post'] // chunk_shape[2]
partner_df['cid_post'] = encode_coords_to_uint64(partner_df[['cz_post', 'cy_post', 'cx_post']].values)
partner_df['bz_post'] = partner_df['z_post'] // 64
partner_df['by_post'] = partner_df['y_post'] // 64
partner_df['bx_post'] = partner_df['x_post'] // 64
partner_df['bid_post'] = encode_coords_to_uint64(partner_df[['bz_post', 'by_post', 'bx_post']].values)
num_chunks = partner_df['cid_post'].nunique()
_post = partial(_post_psd_chunk, server, uuid, instance, chunk_shape, merge_existing)
compute_parallel(_post, partner_df.groupby(['cz_post', 'cy_post', 'cx_post']),
total=num_chunks, processes=processes, ordered=False, starmap=True)
def _post_psd_chunk(server, uuid, instance, chunk_shape, merge_existing, c_zyx, chunk_df):
block_jsons = {}
for (bz, by, bx), block_df in chunk_df.groupby(['bz_post', 'by_post', 'bx_post']):
block_jsons[f"{bx},{by},{bz}"] = compute_psd_jsons(block_df)
if merge_existing:
chunk_start = np.asarray(c_zyx) * chunk_shape
chunk_stop = chunk_start + chunk_shape
existing = fetch_blocks(server, uuid, instance, [chunk_start, chunk_stop])
for key in existing.keys():
if key in block_jsons:
block_jsons[key].extend(existing[key])
elif existing[key]:
block_jsons[key] = existing[key]
post_blocks(server, uuid, instance, block_jsons)
def delete_all_synapses(server, uuid, instance, box=None, chunk_shape=(256,256,64000)):
if box is None or isinstance(box, str):
# Determine name of the segmentation instance that's
# associated with the given synapses instance.
syn_info = fetch_instance_info(server, uuid, instance)
seg_instance = syn_info["Base"]["Syncs"][0]
if isinstance(box, str):
assert box == seg_instance, \
("The segmentation instance name you provided doesn't match the name of the sync'd instance.\n"
"Please provide an explicit bounding-box.")
box = fetch_volume_box(server, uuid, seg_instance)
box = np.asarray(box)
assert (box % 64 == 0).all(), "box must be block-aligned"
chunk_boxes = boxes_from_grid(box, chunk_shape, clipped=True)
_erase = partial(_erase_chunk, server, uuid, instance)
compute_parallel(_erase, chunk_boxes, processes=32)
def _erase_chunk(server, uuid, instance, chunk_box):
"""
Helper for delete_all_synapses().
Fetch all blocks in the chunk (to see which blocks have data)
and erase the ones that aren't empty.
"""
EMPTY = []
chunk_data = fetch_blocks(server, uuid, instance, chunk_box)
empty_data = {k:EMPTY for k,v in chunk_data.items() if v}
post_blocks(server, uuid, instance, empty_data, kafkalog=False)
def compute_tbar_jsons(partner_df):
"""
Compute the element JSON data that corresponds
to the tbars in the given partner table.
If you are posting an initial set of tbar points without any PSDs,
simply omit the '_post' columns from the table.
"""
block_ids = partner_df[['z_pre', 'y_pre', 'z_pre']].values // 64
assert not np.diff(block_ids, axis=0).any(), \
f"DataFrame contains multiple blocks!\n{partner_df}"
tbars_only = ('x_post' not in partner_df.columns)
tbar_jsons = []
for _pre_id, tbar_df in partner_df.groupby('pre_id'):
tbar_xyz = tbar_df[['x_pre', 'y_pre', 'z_pre']].values[0].tolist()
tbar_conf = tbar_df['conf_pre'].iloc[0]
tbar_json = {
"Pos": tbar_xyz,
"Kind": "PreSyn",
"Tags": [],
"Prop": {"conf": str(tbar_conf), "user": "$fpl"},
}
if tbars_only:
tbar_json["Rels"] = []
else:
tbar_json["Rels"] = [{"Rel": "PreSynTo", "To":c} for c in tbar_df[['x_post', 'y_post', 'z_post']].values.tolist()]
tbar_jsons.append(tbar_json)
return tbar_jsons
def compute_psd_jsons(partner_df):
"""
Compute the element JSON data that corresponds to the PSDs in the given partner table
"""
block_ids = partner_df[['z_post', 'y_post', 'z_post']].values // 64
assert np.equal.reduce(block_ids, axis=0).all()
psd_jsons = []
for row in partner_df.itertuples():
psd_jsons.append({
"Pos": [int(row.x_post), int(row.y_post), int(row.z_post)],
"Kind": "PostSyn",
"Tags": [],
"Prop": {"conf": str(row.conf_post), "user": "$fpl"},
"Rels": [{"Rel": "PostSynTo", "To": [int(row.x_pre), int(row.y_pre), int(row.z_pre)]}]
})
return psd_jsons
def load_gary_psds(pkl_path):
"""
Load a pickle file as given by Gary's code and return a 'partner table'.
"""
import pickle
data = pickle.load(open(pkl_path, 'rb'))
_table = []
for tbar_coord, tbar_conf, psd_coords, psd_confs in tqdm_proxy(zip(data['locs'], data['conf'], data['psds'], data['psds_conf']), total=len(data['locs'])):
for psd_coord, psd_conf in zip(psd_coords, psd_confs):
_table.append([*(tbar_coord[::-1]), tbar_conf, *(psd_coord[::-1]), psd_conf])
df = pd.DataFrame(_table, columns=['z_pre', 'y_pre', 'x_pre', 'conf_pre', 'z_post', 'y_post', 'x_post', 'conf_post'])
for col in ['z_pre', 'y_pre', 'x_pre', 'z_post', 'y_post', 'x_post']:
df[col] = df[col].astype(np.int32)
df['pre_id'] = encode_coords_to_uint64(df[['z_pre', 'y_pre', 'x_pre']].values)
df['post_id'] = encode_coords_to_uint64(df[['z_post', 'y_post', 'x_post']].values)
df['user_pre'] = df['user_post'] = '$fpl'
df['kind_pre'] = 'PreSyn'
df['kind_post'] = 'PostSyn'
df = df[['pre_id', 'z_pre', 'y_pre', 'x_pre', 'kind_pre', 'conf_pre', 'user_pre',
'post_id', 'z_post', 'y_post', 'x_post', 'kind_post', 'conf_post', 'user_post']]
return df
def add_synapses(point_df, partner_df, new_psd_partners_df):
"""
Add the PSDs from new_psd_partners_df, which may reference
existing tbars, or may reference new tbars, in which
case the tbars will be added, too.
"""
POINT_COLS = ['z', 'y', 'x', 'kind', 'conf', 'user']
PARTNER_COLS_PRE = ['pre_id', 'z_pre', 'y_pre', 'x_pre', 'kind_pre', 'conf_pre', 'user_pre']
PARTNER_COLS_POST = ['post_id', 'z_post', 'y_post', 'x_post', 'kind_post', 'conf_post', 'user_post']
PARTNER_COLS = [*PARTNER_COLS_PRE, *PARTNER_COLS_POST]
partner_df = partner_df[PARTNER_COLS]
new_psd_partners_df = new_psd_partners_df[PARTNER_COLS]
# Check for possible conflicts before we begin
conflicts = (pd.Index(new_psd_partners_df['pre_id'].values)
.intersection(new_psd_partners_df['post_id'].values))
if len(conflicts) > 0:
raise RuntimeError("tbars and psds in the new set overlap!")
conflicts = (pd.Index(new_psd_partners_df['pre_id'].values)
.intersection(partner_df['post_id'].values))
if len(conflicts) > 0:
raise RuntimeError("tbars in the new set overlap with psds in the old set!")
conflicts = (pd.Index(new_psd_partners_df['post_id'].values)
.intersection(partner_df['pre_id'].values))
if len(conflicts) > 0:
raise RuntimeError("psds in the new set overlap with tbars in the old set!")
partner_df = pd.concat((partner_df, new_psd_partners_df), ignore_index=True, sort=True)
partner_df.drop_duplicates(['pre_id', 'post_id'], keep='last', inplace=True)
# Update points
new_points_pre = (new_psd_partners_df
.rename(columns={'pre_id': 'point_id', **dict(zip(PARTNER_COLS_PRE[1:], POINT_COLS))})
.drop_duplicates('point_id', keep='last')
.set_index('point_id'))
new_points_post = (new_psd_partners_df
.rename(columns={'post_id': 'point_id', **dict(zip(PARTNER_COLS_POST[1:], POINT_COLS))})
.drop_duplicates('point_id', keep='last')
.set_index('point_id'))
point_df = pd.concat((point_df, new_points_pre, new_points_post), sort=True)
# Drop duplicate point_ids, keep new
point_df = point_df.loc[~point_df.index.duplicated(keep='last')]
return point_df, partner_df
def delete_psds(point_df, partner_df, obsolete_partner_df):
"""
Delete the PSDs listed in the given obsolete_partner_df.
If any tbars are left with no partners, delete those tbars, too.
"""
obsolete_partner_df = obsolete_partner_df[['pre_id', 'post_id']]
obsolete_pre_ids = obsolete_partner_df['pre_id'].values
obsolete_post_ids = obsolete_partner_df['post_id'].values
# Drop obsolete PSDs
point_df = point_df.query('kind == "PreSyn" or point_id not in @obsolete_post_ids')
partner_df = partner_df.query('post_id not in @obsolete_post_ids')
# Delete empty tbars
remaining_tbar_ids = partner_df['pre_id'].unique()
dropped_tbar_ids = obsolete_partner_df.query('pre_id not in @remaining_tbar_ids')['pre_id'].unique()
point_df = point_df.query('kind == "PostSyn" or point_id not in @dropped_tbar_ids')
return point_df.copy(), partner_df.copy(), dropped_tbar_ids
def delete_tbars(point_df, partner_df, obsolete_tbar_ids):
"""
Delete the given tbars and all of their associated PSDs.
"""
_obsolete_psd_ids = partner_df.query('pre_id in @obsolete_tbar_ids')['post_id'].values
partner_df = partner_df.query('pre_id not in @obsolete_tbar_ids')
q = (' (kind == "PreSyn" and point_id not in @obsolete_tbar_ids)'
' or (kind == "PostSyn" and point_id not in @_obsolete_psd_ids)')
point_df = point_df.query(q)
return point_df.copy(), partner_df.copy()
def select_autapses(partner_df):
"""
Select rows from the given 'partner table' that correspond to autapses.
Must have columns body_pre and body_post.
"""
return partner_df.query('body_pre == body_post')
def select_redundant_psds(partner_df):
"""
Select rows of the given 'partner table' that correspond to redundant PSDs.
If a tbar has more than one connection to the same body, then all but one
of them are considered redundant.
This function returns the less confident PSD entries as redundant.
"""
if 'conf_post' in partner_df:
partner_df = partner_df.sort_values('conf_post')
else:
logger.warning("DataFrame has no 'conf_post' column. Discarding redundant PSDs in arbitrary order.")
dupe_psd_rows = partner_df.duplicated(['pre_id', 'body_post'], keep='last')
dupe_partner_df = partner_df.loc[dupe_psd_rows]
return dupe_partner_df.copy()
| [
"bergs@janelia.hhmi.org"
] | bergs@janelia.hhmi.org |
162f3d227d62db88c01557f97bade2418654e555 | 537fa02ee54e81117e480aee6acbfd8e9761cc4b | /testspi.py | 983c21703f56436e5496ec22ebca2eea709fd8bc | [] | no_license | kritsana3376/encoder | 95a9a8e7f7b01526b610ddc97198989b20f5aeb4 | c6a8c10557c9ff8066f3f6c71c3847734509a79a | refs/heads/main | 2023-03-22T23:42:34.725345 | 2021-03-14T07:33:54 | 2021-03-14T07:33:54 | 347,300,843 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 596 | py | from machine import SPI,SoftSPI,Pin
# configure the SPI master @ 2MHz
# this uses the SPI non-default pins for CLK, MOSI and MISO (``P19``, ``P20`` and ``P21``)
spi = SPI(2, baudrate=80000000, polarity=0, phase=0, bits=8, firstbit=SPI.MSB, sck=Pin(18), mosi=Pin(23), miso=Pin(19))
spi.write(bytes([0x01, 0x02, 0x03, 0x04, 0x05])) # send 5 bytes on the bus
spi.read(5) # receive 5 bytes on the bus
rbuf = bytearray(5)
spi.write_readinto(bytes([0x01, 0x02, 0x03, 0x04, 0x05]), rbuf) # send a receive 5 bytes
int_val = int.from_bytes(spi.read(5), "big")
print(int_val)
print(spi.read(5)) | [
"noreply@github.com"
] | kritsana3376.noreply@github.com |
156f8bc0068c45e320e0bcc1498cd4684b38a0bd | a6de57f63996bd1d9400ab4d62ef46f579926053 | /lscarpy/main.py | 72071e3112c94bfe5c42453d01f8231f431ea83b | [] | no_license | nobodyLee/LearningScrapy | d2656b833adba934e8be0dc03f9a72185f04be20 | 59d2c0fd866a9cac9a4c071a273bbdd56a160164 | refs/heads/master | 2022-08-16T07:27:37.069734 | 2018-12-05T15:31:45 | 2018-12-05T15:31:45 | 160,011,650 | 0 | 0 | null | 2022-07-29T22:38:43 | 2018-12-02T03:59:02 | Python | UTF-8 | Python | false | false | 170 | py | import os
import sys
from scrapy.cmdline import execute
project_path = os.path.dirname(__file__)
sys.path.append(project_path)
execute(['scrapy', 'crawl', 'quotes'])
| [
"lile2080@gmail.com"
] | lile2080@gmail.com |
40ccd51ea1d674209bf46cbea751869f208c6df8 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/coins_20200608093830.py | 2b449f4937abf589c1a075934356f3463068e9c8 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 178 | py |
def change(amount,coins):
count = 0
for i in range(len(coins)):
times = coins[i] / amount
print(times)
change(5,[1,2,5])
| [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
6fc37d6f6883edf0c4c384f4b537d8dad0fe89e7 | 342da770c81efdfdca0ed2a3435ba09b13c96ea2 | /MovieColors.py | ea564fbedd930a7f872d08d055c74894d22e75b7 | [
"MIT"
] | permissive | tylern4/MovieColors | 860aee604c64f1c7b2e522682b165f938ce30e68 | 5f0fcd86b13b05312585e59e09db9b81fa6b273f | refs/heads/main | 2023-05-05T08:15:31.439858 | 2021-05-24T12:07:20 | 2021-05-24T12:07:20 | 369,832,445 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,895 | py | from collections import Counter
import matplotlib.pyplot as plt
import numpy as np
import cv2
from tqdm import trange, tqdm
try:
import boost_histogram as bh
histColors = bh.numpy.histogram
except ImportError:
print("Using Numpy hists")
histColors = np.histogram
def GetMax(img):
img = np.round(img, 3)
img = img.reshape(img.shape[0]*img.shape[1], img.shape[2])
c = Counter(map(tuple, img))
maximum = c.most_common()[0][0]
rgb = [x for x in maximum]
rgb.append(1)
return np.array(rgb).T
def count_frames(video):
total = 0
# loop over the frames of the video
while True:
# grab the current frame
(grabbed, frame) = video.read()
# check to see if we have reached the end of the
# video
if not grabbed:
break
# increment the total number of frames read
total += 1
# return the total number of frames in the video file
video.set(1, 0)
return total
def half_size(frame):
return cv2.resize(frame, (frame.shape[1]//2, frame.shape[0]//2))
def get_frames(video):
total = count_frames(video)
colors = np.zeros((192, total, 4))
video.set(1, 0)
frames = []
for i in trange(total):
(grabbed, frame) = video.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)/256
frame = half_size(frame)
colors[:, i] = GetMax(frame)
return colors, total
cap = cv2.VideoCapture(
"/Users/tylern/Downloads/Do You Love Me-fn3KWM1kuAw.mkv")
# cap = cv2.VideoCapture(
# "/Users/tylern/Desktop/Wire-kGj_HkKhhSE.mkv")
frames, total = get_frames(cap)
# colors = np.zeros((192, len(frames), 4))
# for i, f in tqdm(enumerate(frames)):
# colors[:, i] = GetMax(f)
fig, ax = plt.subplots(figsize=[192, total//100])
plt.imshow(frames)
plt.axis("off")
plt.savefig("pic.png", bbox_inches='tight', pad_inches=0)
| [
"tylern@canisius.edu"
] | tylern@canisius.edu |
7dd7acbd17cee8b4c05c6f118abbd654aca5e2d0 | 797f21680bf51656db629691cc667a4ddae7a513 | /final_exams/heroes_of_code_and_logic_VII.py | 758594704481bd5724bca88a701dcec11bcbc266 | [] | no_license | yordan-marinov/fundamentals_python | 48f5ab77814fddc6d3cb5a8d4b5e14f1eebf1298 | e1e9544d02be99640623317fadee810b503e7d9f | refs/heads/master | 2023-01-24T04:59:48.140176 | 2020-12-14T14:21:49 | 2020-12-14T14:21:49 | 309,784,119 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,679 | py | def get_heroes_data() -> dict:
number_heroes = int(input())
heroes_data = {}
for _ in range(number_heroes):
data = input().split()
hero_name = data[0]
hit_points = int(data[1])
mana_points = int(data[2])
heroes_data[hero_name] = {
"hp": hit_points,
"mp": mana_points,
}
return heroes_data
def cast_spell(dd: dict, *args) -> dict:
hero_name = args[0]
mp_needed = int(args[1])
spell_name = args[2]
if dd[hero_name]["mp"] >= mp_needed:
dd[hero_name]["mp"] -= mp_needed
print(
f"{hero_name} has successfully cast {spell_name} "
f"and now has {dd[hero_name]['mp']} MP!"
)
else:
print(f"{hero_name} does not have enough MP to cast {spell_name}!")
return dd
def take_damage(dd: dict, *args) -> dict:
hero_name = args[0]
damage = int(args[1])
attacker = args[2]
dd[hero_name]["hp"] -= damage
if dd[hero_name]["hp"] > 0:
print(
f"{hero_name} was hit for {damage} HP by {attacker} and "
f"now has {dd[hero_name]['hp']} HP left!"
)
else:
print(f"{hero_name} has been killed by {attacker}!")
del dd[hero_name]
return dd
def recharge(dd: dict, *args) -> dict:
hero_name = args[0]
amount = int(args[1])
if dd[hero_name]["mp"] + amount > MAXIMUM_POINTS["mp"]:
amount = MAXIMUM_POINTS["mp"] - dd[hero_name]["mp"]
print(f"{hero_name} recharged for {amount} MP!")
dd[hero_name]["mp"] += amount
return dd
def heal(dd: dict, *args) -> dict:
hero_name = args[0]
amount = int(args[1])
if dd[hero_name]["hp"] + amount > MAXIMUM_POINTS["hp"]:
amount = MAXIMUM_POINTS["hp"] - dd[hero_name]["hp"]
print(f"{hero_name} healed for {amount} HP!")
dd[hero_name]["hp"] += amount
return dd
def main_manipulation_print_func(dd: dict, commands) -> print:
while True:
data = input()
if data == "End":
sorting_printing_func(dd)
break
data = data.split(" - ")
command = data.pop(0)
commands[command](dd, *data)
def sorting_printing_func(dd: dict) -> print:
for name, values in sorted(
dd.items(),
key=lambda pair: (-pair[1]["hp"], pair[0])
):
print(f"{name}")
print(f" HP: {values['hp']}")
print(f" MP: {values['mp']}")
MAXIMUM_POINTS = {"hp": 100, "mp": 200}
COMMANDS = dict(
CastSpell=cast_spell,
TakeDamage=take_damage,
Recharge=recharge,
Heal=heal
)
heroes = get_heroes_data()
main_manipulation_print_func(heroes, COMMANDS)
| [
"jordanmarinov8@gmail.com"
] | jordanmarinov8@gmail.com |
abf6c72f5811a95f7c42e43b2f53f8a183c640d1 | 2f1b5f9eb12201bc332f46ed18d115336bcb593e | /HW9/Function-cmd_end_file.py | ceeff500d40dcdf610795aef9f4e25caade5bb4a | [] | no_license | zyzfred/BU-CS-521 | 86aed463a15ec1137ca9fd6db23228f1541d6184 | 665d9994aee93dd3f38fbb47ae20f1e984a1d981 | refs/heads/master | 2020-08-07T17:27:41.525697 | 2020-03-24T01:14:22 | 2020-03-24T01:14:22 | 213,537,886 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,405 | py | N_PTR = 0; P_PTR = 1; DATA = 2
x_str = """Monday
Tuesday
Wednesday"""
def construct_linked_list(y_str):
y_list = y_str.split('\n')
new_start = None
new_end = None
for e in y_list:
new_node = [None, None, e]
if new_start is None and new_end is None:
new_start = new_node
new_end = new_node
else:
new_node[P_PTR] = new_end
new_end[N_PTR] = new_node
new_end = new_node
return new_start, new_end
def print_sublist(first, last):
next_node = first
while next_node is not None:
print(next_node[DATA])
if next_node == last:
break
next_node = next_node[N_PTR]
return
def get_record(first, last, pos):
next_node = first
counter = -1
while next_node is not None:
counter = counter + 1
if counter == pos:
break
else:
next_node = next_node[N_PTR]
return next_node
def print_file(start, end, cur_node, delta):
if cur_node is not start:
end_left_sublist = cur_node[P_PTR]
print_sublist(start, end_left_sublist)
cur_line = cur_node[DATA]
print(cur_line[ : delta] + '$' + cur_line[delta : ])
if cur_node is not end:
start_right_sublist = cur_node[N_PTR]
print_sublist(start_right_sublist, end)
print('\n')
return
def cmd_end_file(start, end, z, delta):
z = end
last_line = z[DATA]
return start, end, z, len(last_line)
# this function looks for a target in a double linked list
# if it finds it, it will return record and delta
# otherwise it will return None, -1
def helper_find(x_start, x_end, target):
if x_start is not None:
next_record = x_start
while next_record is not None:
cur_line = next_record[DATA]
pos = cur_line.find(target)
if pos >= 0:
return next_record, pos
else:
next_record = next_record[N_PTR]
return None, -1
else:
return None, -1
new_start, new_end = construct_linked_list(x_str)
z = get_record(new_start, new_end, 1)
delta = 3
print_file(new_start, new_end, z, delta)
new_start, new_end, z, delta = cmd_end_file(new_start, new_end, z, delta)
print_file(new_start, new_end, z, delta) | [
"noreply@github.com"
] | zyzfred.noreply@github.com |
d2957afc68dcf2e4dccb487d4684c5cb91508b0f | 89ad38eb84a12f088c6c788dbe76ca843f2d2d4b | /8c.py | 34578ce39edba22783320f1b555715b430b7379d | [] | no_license | gogle7/pp | 84b37687ceed31785ac1788edb76821e0597309d | 4f0de040363007cf8f3f003b618ab182dff65ad9 | refs/heads/master | 2020-08-15T13:37:10.990755 | 2019-12-15T17:45:40 | 2019-12-15T17:45:40 | 215,351,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 299 | py | def is_square(n):
if int(n**0.5)==n**0.5:
return True
else:
return False
def is_even(n):
if n%2:
return False
else:
return True
n=int(input("Enter the upper limit"))
for i in range(2,n+1,2):
if is_even(i) and is_square(i):
print(i,end=" ") | [
"noreply@github.com"
] | gogle7.noreply@github.com |
31fcf2bbe48b9ca2aaa2bc78d04178f0c457844a | 77b14cad386b39f2787ef98e37ea5426385f61f1 | /Problems/Greetings/task.py | 4f3c33d40df27ea8c6f6bfa466641e67e973346c | [] | no_license | pblvll/Coffee_Machine_2 | babb414fe91841743a7ca0d700a090063a59a13c | 4691bc6a0d6fabe091183d0abcdc53f69d128dc7 | refs/heads/master | 2022-11-25T01:50:52.654225 | 2020-07-06T08:47:23 | 2020-07-06T08:47:23 | 277,490,373 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 226 | py | class Person:
def __init__(self, name):
self.name = name
# create the method greet here
def greet(self):
return print("Hello, I am {}!".format(self.name))
person = Person(input())
person.greet()
| [
"enternamehere1236@gmail.com"
] | enternamehere1236@gmail.com |
7d57e479d49137809d438c3678a4bb0343657241 | 82ee12ae4bf809e4609414446dd5e8e9d15977b0 | /importexcel.py | a8121eea4cc662c9ad55d4e3b566d6077c72a5ad | [] | no_license | kengdeb/django_SCCC | b5b802b5b9f0738e3bb89106ad6b591bc1ef87f0 | bb63e5c4e352631081c01bdc19a6669aabf10ecd | refs/heads/master | 2023-04-10T00:27:30.730628 | 2021-04-08T22:01:22 | 2021-04-08T22:01:22 | 355,923,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 611 | py | import pandas as pd
from tracking.models import Transaction
excel = pd.read_excel('shipmentTracking.xlsx', sheet_name = 'Sheet1')
col1 = excel['ShipmentID'].tolist()
col2 = excel['PreDONo'].tolist()
col3 = excel['Delivery Order No'].tolist()
col4 = excel['Net Weight Qty'].tolist()
col5 = excel['Weight Out Date'].tolist()
#print(col1[0],col2[0],col3[0],col4[0],col5[0])
for a,b,c,d,e in zip(col1,col2,col3,col4,col5):
new = Transaction()
new.shipment_id = a
new.pre_do_no = b
new.delivery_order_no = c
new. net_weight_qty = d
new.weight_out_date = e
new.save()
print('success')
| [
"kengdeb@gmail.com"
] | kengdeb@gmail.com |
bb70cecbcaf2c91ec55a4de0ce0aad25caaead16 | 6fbbed1b9fc752f593767600ab6d7c35841a52ca | /test_app/urls.py | 948f1aea4955d10ed150d272c9dcc0280a79d34a | [] | no_license | Andrrii/my-first-django-project | 4eb01e7054a4acf01c3a3e5ceb4b2814df276f8f | 4b498527cb1c7a3882c8a36c8a4a6ace242557df | refs/heads/main | 2023-02-05T21:58:51.798123 | 2020-12-26T01:44:56 | 2020-12-26T01:44:56 | 324,453,511 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 169 | py | from django.urls import path, include
from .views import *
urlpatterns = [
path('', test, name='test'),
path('rubric/<int:pk>/', get_rubric, name='rubric'),
]
| [
"butsaandrii@gmail.com"
] | butsaandrii@gmail.com |
a098718f61bca732c6ad9176f6c68f5d8e6f4ce9 | e3163cf1e81dada8cb4edd79f3ed64fc366908f2 | /tools/mpegCreatorEphemLb.py | 196783b6ff7116a9844a6d5e6b05302710027a26 | [
"BSD-3-Clause"
] | permissive | STScI-Citizen-Science/MTPipeline | 3dc9b3b59479722d10e853040ecfffcc3dca5e06 | 903743afe55592ab67a240237d924c7c7383eec7 | refs/heads/master | 2020-12-24T14:44:55.749258 | 2014-08-21T16:42:56 | 2014-08-21T16:42:56 | 5,459,956 | 4 | 0 | null | 2014-08-21T16:42:56 | 2012-08-18T03:44:27 | Python | UTF-8 | Python | false | false | 3,600 | py | """
File: mpegCreator.py
Date: July 1st, 2013
Project: MT Pipeline
Organisation: Space Telescope Science Institute
Utility to automatically create mpeg movies from png images. Uses
subprocess module calls to the ffmpeg application.
"""
import argparse
import subprocess
import os
#add logs to this and the original
ROOTPATH = "/astro/3/mutchler/mt/drizzled" #image path
def parse_args():
'''
parse the command line arguments.
'''
parser = argparse.ArgumentParser(
description = 'Create mpeg movies from pngs using ffmpeg')
parser.add_argument(
'-source',
'-s',
required = False,
default = False,
help = 'carry out operation for the specified folder only')
args = parser.parse_args()
return args
def buildMovies(movieType, path, scaleType):
"""
makes a subprocess call to the ffmpeg tool to create the movies.
"""
source=path.split('/')[6]
temp = os.path.join(ROOTPATH, 'temp')
listDir = os.listdir(path)
output = os.path.join(ROOTPATH, "movies", "temp", source + "All" + movieType + scaleType + "_ephem_lb.mp4")
subprocess.call(['ffmpeg', '-f', 'image2', '-r', '1',
'-pattern_type', 'glob', '-i','*'+ movieType + '*'+ scaleType + '_ephem_lb.png',
output])
#for the comsmic ray rejected images
output = os.path.join(ROOTPATH, "movies", "temp", source + "CR" + movieType + scaleType + "_ephem_lb.mp4")
subprocess.call(['ffmpeg', '-f', 'image2', '-r', '1',
'-pattern_type', 'glob', '-i', '*cr*' + movieType + '*' + scaleType + '_ephem_lb.png',
output])
#for the non-cosmic ray rejected images
output = os.path.join(ROOTPATH, "movies", "temp", source + "nonCR" + movieType + scaleType + "_ephem_lb.mp4")
#make a list of non-CR rejected images
nonCRWideInput = [i for i in listDir if movieType in i and scaleType + '_ephem_lb' in i and 'cr' not in i]
#copy all the non-CR rejected images to the temp directory
for files in nonCRWideInput:
subprocess.call(['cp', files, temp])
os.chdir(temp)
#carry out the ffmpeg script for our copied files
subprocess.call(['ffmpeg', '-f', 'image2', '-r', '1',
'-pattern_type', 'glob', '-i','*'+ movieType + '*' + scaleType + '_ephem_lb.png',
output])
subprocess.call('rm *.png', shell=True) #delete the temporary files
os.chdir(path) #change back to our ROOTPATH
def runScript(path):
'''
run scripts for both center and wide images
'''
path = os.path.join(path,'png')
os.chdir(path)
buildMovies('wide', path, 'linear')
buildMovies('center', path, 'linear')
buildMovies('wide', path, 'log')
buildMovies('center', path, 'log')
def createMovie():
"""
parses whether the script is to be run for a particular subfolder or
all the subfolders and calls the runScript function accordingly.
If no subfolder given calls runScript iteratively on all subfolders.
"""
path = ROOTPATH
if source: #add sub-directory to path if given
path = ROOTPATH + '/' + source
runScript(path)
else: #else carry out operation for ALL sub-drectories
for dirs in os.listdir(path):
if dirs[0].isdigit():
path = ROOTPATH + '/' + dirs
try:
runScript(path)
except:
print "path not valid"
if __name__ == '__main__':
args = parse_args()
source = args.source
createMovie() | [
"pjoshi@bennington.edu"
] | pjoshi@bennington.edu |
804c1bc920c4adb91eada9b1364edda1ab23c264 | 83ebf2a36fe9dc2a3ad456866273556dacf18df5 | /POMDP-Experiments/train2.py | 0039b89aaca02f13228e0b02b54e10734b717001 | [] | no_license | Shivanshu-Gupta/Dating-Recommendation-System | fba2bb13911823daa31987cd1e83d30d751f68eb | 97213964b73e0fa73dd32a659e530c2eabcc0204 | refs/heads/master | 2021-06-20T09:26:15.401336 | 2017-05-12T16:52:52 | 2017-05-12T16:52:52 | 82,927,067 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 922 | py | import numpy as np
n_users = 2000000
# to be set
features = np.zeros(n_users, 2)
lrate = 0.001
# if using sigmoid(s*a)
def sigma(s, a):
return 1 / (1.0 + exp(- s * a))
def grad_sigma_a(s, a):
temp = sigma(s, a)
return s * temp * (1 - temp)
def grad_sigma_s(s, a):
return a * temp * (1 - temp)
# female user, male profile
# action = 1 if like and 0 if hide
def doDescentFM(u1, u2, action):
s = features[u1][0]
a = features[u2][1]
pred_action = sigma(s, a)
err = lrate * (action - sigma(s, a))
features[u1][0] += err * grad_sigma_s(s, a)
features[u2][1] += err * grad_sigma_a(s, a)
if __name__ == '__main__':
# read the file.
with open('actions.csv', 'r') as actionsf:
actionline = actionsf.readline()
parts = actionline.split(',')
u1 = int(parts[0])
u2 = int(parts[1])
action = int(parts[3])
doDescentFM(u1, u2, action) | [
"shivanshugupta1995@gmail.com"
] | shivanshugupta1995@gmail.com |
d5d6287bab827cf0236f7d7332b929470aea3181 | 6dc0966b35b6eb8e7bb82b16d4935fd7da073a54 | /moltr/setup.py | fd7d4723349af2af7c44ddb1880c5a684fce38ef | [
"MIT"
] | permissive | akurennoy/moltr | 35a3389889c3c631a89235f0903aef8a1220d661 | e6b03be1929ea3da9f4ae8d1c7643c42bbcdb699 | refs/heads/master | 2020-09-02T09:41:28.484495 | 2019-11-10T20:05:26 | 2019-11-10T20:05:26 | 219,192,710 | 9 | 2 | null | null | null | null | UTF-8 | Python | false | false | 502 | py | from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize, build_ext
exts = [Extension(name="lambdaobj",
sources=["lambdaobj.pyx"],
libraries=["argsort"],
library_dirs=["."],
extra_compile_args=["-fopenmp", "-stdlib=libc++"],
extra_link_args=["-lomp", "-L/usr/local/opt/libomp/lib/", "-stdlib=libc++"]
)]
setup(ext_modules=cythonize(exts)) | [
"alexey.kurennoy@zalando.de"
] | alexey.kurennoy@zalando.de |
81682c19895cda7884ae797f81f3bb5be5d8f1c8 | afc8fb99b86de4a639ef7bd50835c6dfef22d66a | /lg_stats/setup.py | 815d8fdcc515e350b852c0d1d35e9055d741d3c1 | [
"Apache-2.0"
] | permissive | carlosvquezada/lg_ros_nodes | e5d373a59fa990aac5d0a97a3ee705e7aec141b4 | 7560e99272d06ef5c80a5444131dad72c078a718 | refs/heads/master | 2020-04-08T13:49:19.622258 | 2018-11-24T00:21:30 | 2018-11-24T00:21:30 | 159,408,388 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 305 | py | #!/usr/bin/env python
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup(
packages=['lg_stats'],
package_dir={'': 'src'},
scripts=[],
requires=[]
)
setup(**d)
# vim: tabstop=8 expandtab shiftwidth=4 softtabstop=4
| [
"zdenek@endpoint.com"
] | zdenek@endpoint.com |
72b183e7fbbd6ec70c5a74162981d595e0871048 | 341bbc621faaaacc06bc89f9c2ece37b679e928c | /main/migrations/0017_remove_course_prereqs.py | 0bf8ee09d30eef66daaeb1204d3ec72e976baf7f | [] | no_license | mnyark/CourseFlow | 12e92ccbeec86a220d80f4a553ec555ece47675c | 4df0116b49bdcdf5723967e384a6f2fce15317d3 | refs/heads/master | 2020-03-28T10:02:02.769007 | 2018-11-26T19:04:49 | 2018-11-26T19:04:49 | 148,077,667 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 323 | py | # Generated by Django 2.1.1 on 2018-11-25 22:28
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0016_course_semester'),
]
operations = [
migrations.RemoveField(
model_name='course',
name='prereqs',
),
]
| [
"zoesteier1@gmail.com"
] | zoesteier1@gmail.com |
a4492f9bccb1be93d230c9e6268907e628025ddc | 318012073f44d8169f0684fa444a899bdc218062 | /python/Interview_prep/hashtable/anagram_checker.py | ed03a3f41af030fc08b5507213951b3cbccd829b | [] | no_license | Novandev/hackerrank | f01b1f33255e8009ceb7512ca131fb33c052a72f | 0629c75f6bc1c6a21a3fd46e5e1cb4d133918e68 | refs/heads/master | 2020-04-03T11:42:03.957640 | 2019-04-23T00:10:09 | 2019-04-23T00:10:09 | 155,229,201 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 277 | py | """
Two string are anagrams if the letters of one string can be used to form another
"""
def anagram_checker(s1: list, s2: list) -> bool:
return [s1.count(x) for x in s1] == [s2.count(x) for x in s2]
if __name__ =="__main__":
print(anagram_checker('boop', 'poob')) | [
"donovan.adams@students.makeschool.com"
] | donovan.adams@students.makeschool.com |
ff370e3deef1c238a5a0f662f60617ff309c9fb4 | 52b5773617a1b972a905de4d692540d26ff74926 | /.history/movingMedian_20200630225631.py | 8bf0eecde0b2a7ea0cd4d0c81ed465699b4c24f6 | [] | no_license | MaryanneNjeri/pythonModules | 56f54bf098ae58ea069bf33f11ae94fa8eedcabc | f4e56b1e4dda2349267af634a46f6b9df6686020 | refs/heads/master | 2022-12-16T02:59:19.896129 | 2020-09-11T12:05:22 | 2020-09-11T12:05:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 112 | py | def MovingMedian(arr):
answer = []
window = arr[0]
for i in range
print(MovingMedian([3,1,3,5,10,6,4,3,1])) | [
"mary.jereh@gmail.com"
] | mary.jereh@gmail.com |
7249432850269ce782dcd2ee3e39dbb4f43dc44e | ea9ad1d8a77f9f1d506e0c6779ffd25edcc47316 | /esperanzo_bot/__init__.py | 6deb49416b7fdf03ede3c0f398b533c9ebf31b4c | [] | no_license | charx7/esperanzo-bot | e47c4f3abf3a68a501f789b8cd5e04d59a72f0b9 | 5a47342d8763f97ecfbdd890156009f754b416fc | refs/heads/master | 2022-11-09T03:24:41.706036 | 2020-06-29T17:25:53 | 2020-06-29T17:25:53 | 275,403,998 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 500 | py | import dotenv
import os
from flask import Flask
from flask import request
from flask import Response
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///./esperanzo.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
# TODO import User and Todos so that the tables get create with the command
db.create_all() #initialize the db if it doesn't already exists
from esperanzo_bot import routes # import the routes
| [
"carlos.huertaso@udlap.mx"
] | carlos.huertaso@udlap.mx |
1e21179f0570aa693579e6491ffaf4b3e2c88bff | b844c72c394b13d9ed4f73222a934f962d6ff187 | /src/structures/program.py | f7b7a6f7418d7d3bc1faf0ca35d1146a66d15fcf | [] | no_license | curtisbright/sagesat | b9b4c9180c75ce8574217058ffa4e121163ccf36 | 8fe52609ab6479d9b98a1e6cf2199a4f12c27777 | refs/heads/master | 2021-01-01T17:52:01.288449 | 2015-08-19T18:14:26 | 2015-08-19T18:14:26 | 41,425,883 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 401 | py | '''
Created on Oct 17, 2014
@author: ezulkosk
'''
class Program(object):
'''
classdocs
'''
def __init__(self, ast):
'''
Constructors
'''
self.ast = ast
self.bools = { }
self.graphs = { }
def toStr(self, indent):
res = "Program\n"
for i in self.ast:
res += i.toStr(1) + "\n"
return res | [
"ezulkosk@gsd.uwaterloo.ca"
] | ezulkosk@gsd.uwaterloo.ca |
69fca31932acd9d7fb205fb9a26e569d52fb0cd3 | a5c0057fad2414221871f5d4c7dd0e8afb14ac4c | /Budzet/tests.py | 4d213cd16001192fd05f425a7060faa71c4eb89d | [] | no_license | Milida/budzet_d | 89a002a85ff7563ad12a8995f8ca107600b65556 | a2647e32c0592eb7c3e6065fd57b1e670ee57d54 | refs/heads/master | 2023-04-01T06:07:32.384969 | 2020-06-16T12:25:06 | 2020-06-16T12:25:06 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,091 | py | from time import sleep
from selenium import webdriver
import unittest
class tests(unittest.TestCase):
def test_A_open_main_page(self):
driver = webdriver.Chrome(executable_path=r'C:\Testy\chromedriver.exe')
driver.get('localhost:8000/') # otwiera stronę podstawową w przeglądarce
title = driver.title # to co wyświetla się na karcie przeglądarki
assert 'Budżet domowy' == title # sprawdza czy tytuł strony to 'Budżet domowy'
driver.quit() # zamyka przeglądarkę
def test_B_bad_login(self):
driver = webdriver.Chrome(executable_path=r'C:\Testy\chromedriver.exe')
driver.get('localhost:8000/login') # otwiera stronę logowania w przeglądarce
driver.find_element_by_name("Zaloguj").click()
sleep(1)
assert driver.current_url == 'http://localhost:8000/login/'
driver.quit()
def test_C_register(self):
driver = webdriver.Chrome(executable_path=r'C:\Testy\chromedriver.exe')
driver.get('localhost:8000/register') # otwiera stronę logowania w przeglądarce
driver.find_element_by_name('username').send_keys('TestowyUzytkownik')
driver.find_element_by_name('email').send_keys('aaaaaaa@aa.xyz')
driver.find_element_by_name('password1').send_keys('SerotoninaA')
driver.find_element_by_name('password2').send_keys('SerotoninaA')
driver.find_element_by_name('Zarejestruj się').click()
sleep(1)
assert driver.find_element_by_name('rejestracja').is_displayed()
driver.quit()
def test_D_good_login(self):
driver = webdriver.Chrome(executable_path=r'C:\Testy\chromedriver.exe')
driver.get('localhost:8000/login') # otwiera stronę logowania w przeglądarce
driver.find_element_by_name('username').send_keys('TestowyUzytkownik')
driver.find_element_by_name('password').send_keys('SerotoninaA')
driver.find_element_by_name("Zaloguj").click()
sleep(1)
assert driver.current_url == 'http://localhost:8000/accounts/profile/'
driver.quit()
| [
"i.milewska@gmail.com"
] | i.milewska@gmail.com |
b30583076cfe8650b79b0ab42baf2dabcc018baf | 59608b1f1d4fc562f4e54aeb939f1038d997a132 | /balance_table.py | 7741b5ea3f68090b559373ec10b90a02a1c24d20 | [] | no_license | PengYunjing/NeteastBalance | 576e96b7a87b8758e5188cde8ce0eff1197ec515 | da10a7d4e5f0da5aca58ceb3005c648bcac4b1fe | refs/heads/master | 2020-03-23T13:55:49.283800 | 2017-11-08T05:35:43 | 2017-11-08T05:35:43 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 694 | py | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QTableWidget, QTableWidgetItem, QAbstractItemView, QHeaderView
class BalanceTable(QTableWidget):
def __init__(self):
QTableWidget.__init__(self)
self.setEditTriggers(QAbstractItemView.NoEditTriggers)
self.setColumnCount(2)
self.horizontalHeader().setSectionResizeMode(0, QHeaderView.Stretch)
self.horizontalHeader().setSectionResizeMode(1, QHeaderView.ResizeToContents)
self.setHorizontalHeaderLabels(["账号", "余额"])
def addItem(self, account, balance):
rowCount = self.rowCount()
self.insertRow(rowCount)
self.setItem(rowCount, 0, QTableWidgetItem(account))
self.setItem(rowCount, 1, QTableWidgetItem(balance)) | [
"517614202@qq.com"
] | 517614202@qq.com |
5f1c39b7f1a0f726f7dcc29c7018a74e5f080035 | 609eb72e6f9fefe18ebe806c2aed24bb5b0562c1 | /apps/invoices/models.py | f0acc13e1d3d4a681f55e66650461091b02f2bd6 | [
"MIT"
] | permissive | PocketGM/django-htk | 68b0f780e9f748932e857bf66f3e0ffdf9fb2fa2 | 371ce2c68bc825df174e11d0f6f4c489a8184d9f | refs/heads/master | 2020-12-27T15:26:31.946007 | 2014-12-12T10:45:45 | 2014-12-12T10:45:45 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,266 | py | from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from htk.apps.invoices.constants import *
from htk.apps.invoices.utils import compute_invoice_code
from htk.fields import CurrencyField
from htk.utils.enums import enum_to_str
class BaseInvoice(models.Model):
customer = models.ForeignKey(settings.HTK_INVOICE_CUSTOMER_MODEL, related_name='invoices')
date = models.DateField()
notes = models.TextField(max_length=256, blank=True)
invoice_type = models.PositiveIntegerField(default=HTK_INVOICE_DEFAULT_TYPE.value)
paid = models.BooleanField(default=False)
payment_terms = models.PositiveIntegerField(default=HTK_INVOICE_DEFAULT_PAYMENT_TERM.value)
class Meta:
abstract = True
def __unicode__(self):
value = 'Invoice #%s' % self.id
return value
def get_encoded_id(self):
invoice_code = compute_invoice_code(self)
return invoice_code
def get_url(self):
url = reverse('invoices_invoice', args=(self.get_encoded_id(),))
return url
def get_total(self):
line_items = self.line_items.all()
subtotal = 0
for line_item in line_items:
subtotal += line_item.get_amount()
return subtotal
def get_invoice_type(self):
from htk.apps.invoices.enums import InvoiceType
invoice_type = InvoiceType(self.invoice_type)
return invoice_type
def get_payment_terms(self):
from htk.apps.invoices.enums import InvoicePaymentTerm
invoice_payment_term = InvoicePaymentTerm(self.payment_terms)
str_value = enum_to_str(invoice_payment_term)
return str_value
class BaseInvoiceLineItem(models.Model):
invoice = models.ForeignKey(settings.HTK_INVOICE_MODEL, related_name='line_items')
name = models.CharField(max_length=64)
description = models.TextField(max_length=256)
unit_cost = CurrencyField(default=0)
quantity = models.PositiveIntegerField(default=1)
class Meta:
abstract = True
def __unicode__(self):
value = 'Line Item for Invoice #%s' % self.invoice.id
return value
def get_amount(self):
amount = self.unit_cost * self.quantity
return amount
| [
"jontsai@jonathantsai.com"
] | jontsai@jonathantsai.com |
be0a1cc91d8df25389527327b4edc65b4e4d465f | de67709fc7ae7e62c170810be39d93fdaebcac82 | /challenge1.py | 1fe3729825e963086c8d89cb018906c8f0681ebe | [] | no_license | Amin747/mycode | 656296808a1610faf9ceb004f119e055a62d1bc0 | e3af84203d72bd10abccffe674259e209102abe4 | refs/heads/main | 2023-03-28T22:00:56.067879 | 2021-04-08T17:53:41 | 2021-04-08T17:53:41 | 352,703,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 707 | py | #!/usr/bin/env python3
char_name = input("Which character do you want to know about? (Wolverine, Harry Potter, Agent Fitz)")
char_stat = input("What statistic do you want to know about? (real name, powers, archenemy)")
heroes= {
"wolverine":
{"real name": "James Howlett",
"powers": "regeneration",
"archenemy": "Sabertooth",},
"harry potter":
{"real name": "Harry Potter",
"powers": "he's a wizard",
"archenemy": "Voldemort",},
"agent fitz":
{"real name": "Leopold Fitz",
"powers": "intelligence",
"archenemy": "Hydra",}
}
value = heroes[char_name][char_stat]
print(char_name + "'s" + char_stat + " is " value
print(char_name + "'s" + char_stat + " is " value
| [
"amin747@gmail.com"
] | amin747@gmail.com |
5d5c28f3931fd06266328a04e18aa0ef90f1698a | d2c469b82ec227cda12ccdc8cb84f3da895e8bef | /api/migrations/0002_alter_task_id.py | 855b8d4da2e8dc58f55e3e1cd99699800408be38 | [] | no_license | manan2110/ToDo-App | c7bb37f1531bfaa8e796d9ab0fa723411a7e028a | c52decc3f9ae559824502b7ec2c0661291d6f871 | refs/heads/main | 2023-06-27T04:45:42.852674 | 2021-07-23T23:48:49 | 2021-07-23T23:48:49 | 387,916,617 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 426 | py | # Generated by Django 3.2.5 on 2021-07-20 21:39
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='task',
name='id',
field=models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID'),
),
]
| [
"manan.co.in@gmail.com"
] | manan.co.in@gmail.com |
2dce1b8a3f17a14efbf12da328b569727e34024a | 65d9d25cc9d6761f52432e1436df7a46079b8e55 | /Ch05/sub1/Account.py | 5d7c0809300c6ec101ab860e50dfac5c09241b53 | [] | no_license | kimhalyn/Python | d1f1e03cda8cbada04ae0169a367be23fdd0fbfa | 2d1ca851c90e0f61497611a7501d556c95e0712c | refs/heads/master | 2023-02-04T02:41:12.188840 | 2020-12-28T06:24:51 | 2020-12-28T06:24:51 | 323,271,214 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 712 | py | # 클래스 정의
class Account:
# 멤버변수 : 생성자 ( __init__ 함수)에서 선언, self는 클래스의 멤버임을 의미
def __init__(self, bank, id, name, money):
self.bank = bank
self.id = id
self.name = name
self.money = money
# 멤버 함수
def deposit(self, _money):
self.money += _money
def withdraw(self, _money):
self.money -= _money
def show(self):
print('-----------------------------')
print('은행명 : ', self.bank)
print('계좌번호 : ', self.id)
print('입금주 : ', self.name)
print('현재잔액 : ', self.money)
print('-----------------------------')
| [
"reedweed@naver.com"
] | reedweed@naver.com |
4bd8dd1ae4d0a490aba2aeb6656b3246b7aa3b32 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/doctor/migrations/0225_merge_20190314_1713.py | b3a7b682746a743fbde3d1c7d89eac3c2c5f43b0 | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 286 | py | # Generated by Django 2.0.5 on 2019-03-14 11:43
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('doctor', '0224_auto_20190314_1709'),
('doctor', '0223_providersignuplead_matrix_lead_id'),
]
operations = [
]
| [
"navneetsingh@docprime.com"
] | navneetsingh@docprime.com |
c35aa7b06fb0de485363edc1da75caeecd3bf974 | 9dba277eeb0d5e9d2ac75e2e17ab5b5eda100612 | /exercises/1901050046/d11/mymodule/main.py | 3607d2c0e686c773d0b1336d3c3af49404d9e679 | [] | no_license | shen-huang/selfteaching-python-camp | e8410bfc06eca24ee2866c5d890fd063e9d4be89 | 459f90c9f09bd3a3df9e776fc64dfd64ac65f976 | refs/heads/master | 2022-05-02T05:39:08.932008 | 2022-03-17T07:56:30 | 2022-03-17T07:56:30 | 201,287,222 | 9 | 6 | null | 2019-08-08T15:34:26 | 2019-08-08T15:34:25 | null | UTF-8 | Python | false | false | 867 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import yagmail
import getpass
import requests
import stats_word
from pyquery import PyQuery
#提取文章正文
r = requests.get('https://mp.weixin.qq.com/s/pLmuGoc4bZrMNl7MSoWgiA')
document = PyQuery(r.text)
content = document('#js_content').text()
#统计词频
result = stats_word.stats_text_cn(content,100,len_size = 2)
result_str = ""
for i in result:
result_str += str(i)
print("统计结果为:", result_str)
#配置邮箱
sender = input("输入发件人邮箱:")
password = getpass.getpass('输入发件人邮箱密码:')
recipients = input('输入收件人邮箱:')
#链接邮箱服务器
yag = yagmail.SMTP( user= sender, password=password, host='smtp.sina.cn')
# 邮箱正文
contents = result_str
# 发送邮件
yag.send(recipients, '自学训练营学习4群 DAY11 sixthspace', contents)
| [
"2470962+srvz@users.noreply.github.com"
] | 2470962+srvz@users.noreply.github.com |
35ae47c497671fc6dafd9780079da3b23fa95f2b | 15c7a09c290ca405885ba532e54ac6054632d0ab | /src/utils/camera.py | 8c4f2ac233f6f16940eb731f4119033c6d60123d | [
"Apache-2.0"
] | permissive | DianaTaukin/DSD-SATN | 8304bb2794533dcb30fbf687b4d6c9dae04d461d | 5a4ab5e3cfcb00e72ca27cf5ec10a8d8e29ef312 | refs/heads/master | 2020-08-12T19:50:55.644682 | 2019-10-14T21:27:24 | 2019-10-14T21:27:24 | 214,833,038 | 0 | 0 | Apache-2.0 | 2019-10-13T14:17:43 | 2019-10-13T14:17:43 | null | UTF-8 | Python | false | false | 2,520 | py | import numpy as np
import torch
import sys
sys.path.append('../')
from utils.util import wrap
from utils.quaternion import qrot, qinverse
def normalize_screen_coordinates(X, w, h):
assert X.shape[-1] == 2
# Normalize so that [0, w] is mapped to [-1, 1], while preserving the aspect ratio
return X/w*2 - [1, h/w]
def image_coordinates(X, w, h):
assert X.shape[-1] == 2
# Reverse camera frame normalization
return (X + [1, h/w])*w/2
def world_to_camera(X, R, t):
Rt = wrap(qinverse, R) # Invert rotation
return wrap(qrot, np.tile(Rt, (*X.shape[:-1], 1)), X - t) # Rotate and translate
def camera_to_world(X, R, t):
return wrap(qrot, np.tile(R, (*X.shape[:-1], 1)), X) + t
def project_to_2d(X, camera_params):
"""
Project 3D points to 2D using the Human3.6M camera projection function.
This is a differentiable and batched reimplementation of the original MATLAB script.
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
camera_params = camera_params.unsqueeze(1)
f = camera_params[..., :2]
c = camera_params[..., 2:4]
k = camera_params[..., 4:7]
p = camera_params[..., 7:]
XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
r2 = torch.sum(XX[..., :2]**2, dim=len(XX.shape)-1, keepdim=True)
radial = 1 + torch.sum(k * torch.cat((r2, r2**2, r2**3), dim=len(r2.shape)-1), dim=len(r2.shape)-1, keepdim=True)
tan = torch.sum(p*XX, dim=len(XX.shape)-1, keepdim=True)
XXX = XX*(radial + tan) + p*r2
return f*XXX + c
def project_to_2d_linear(X, camera_params):
"""
Project 3D points to 2D using only linear parameters (focal length and principal point).
Arguments:
X -- 3D points in *camera space* to transform (N, *, 3)
camera_params -- intrinsic parameteres (N, 2+2+3+2=9)
"""
assert X.shape[-1] == 3
assert len(camera_params.shape) == 2
assert camera_params.shape[-1] == 9
assert X.shape[0] == camera_params.shape[0]
while len(camera_params.shape) < len(X.shape):
camera_params = camera_params.unsqueeze(1)
f = camera_params[..., :2]
c = camera_params[..., 2:4]
XX = torch.clamp(X[..., :2] / X[..., 2:], min=-1, max=1)
return f*XX + c | [
"936605403@qq.com"
] | 936605403@qq.com |
2efe3453ee781a9544804adf36d12c31d1eb5ddf | b368cab5c8fbba4011e6de5a7d8fffea9a208bab | /src/sammaster/andrebam/GraphGenerator.py | 0a64825e8dacbfcb1292a14638b045c42a36a386 | [] | no_license | yacuzo/KPsim_blocksize_tester | d4d9da7132cc05010aac7bab6a92b76762a0c64d | a1054386e6cc0d6fea36e6578d586aaa2eec32a8 | refs/heads/master | 2021-01-22T09:16:52.757233 | 2013-09-16T08:12:15 | 2013-09-16T08:12:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,119 | py | '''
Created on Sep 6, 2013
@author: andrebam
'''
import csv
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib import cm
import numpy as np
from scipy.interpolate import griddata
class GraphGenerator(object):
'''
classdocs
'''
data = []
file = None
x = []
y = []
status = []
times = []
def __init__(self, csvfile):
'''
Constructor
'''
self.file = csvfile
def readfile(self):
with open(self.file, "r") as f:
reader = csv.reader(f, delimiter=",", quotechar="'")
for row in reader:
self.data.append(row)
self.x.append(int(row[0]))
self.y.append(int(row[1]))
self.status.append(int(row[2]))
self.times.append(float(row[3]))
def surfgraph(self):
fig = plt.figure()
ax = fig.gca(projection='3d')
max_x = max(self.x)
min_x = min(self.x)
max_y = max(self.y)
min_y = min(self.y)
xi = np.linspace(min_x, max_x, max_x - min_x + 1)
yi = np.linspace(min_y, max_y, max_y - min_y + 1)
xy, yx = np.meshgrid(xi, yi)
zi = griddata((self.x, self.y), self.times, (xy, yx), method='linear')
surf = ax.plot_surface(xy, yx, zi, rstride=1, cstride=1, cmap=cm.copper,
linewidth=0, antialiased=False)
plt.show()
def simplegraph(self):
plt.figure(1)
plt.clf()
max_x = max(self.x)
min_x = min(self.x)
max_y = max(self.y)
min_y = min(self.y)
d2_times = np.transpose(np.reshape(np.array(self.times), (max_x - min_x +1, -1)))
print d2_times
im = plt.imshow(d2_times, interpolation='nearest', cmap=cm.hot, origin="lower", aspect='auto')
plt.xlabel('x')
plt.ylabel('y')
im.set_extent([min_x - 0.5,max_x + 0.5,min_y-0.5,max_y+0.5])
plt.colorbar(im,orientation='vertical', shrink=0.8)
plt.show()
gen = GraphGenerator("test.csv")
gen.readfile()
gen.simplegraph() | [
"blackcrow@getmail.no"
] | blackcrow@getmail.no |
47e2512f693b8d7dae3919a19c1129913658adac | 2aace9bb170363e181eb7520e93def25f38dbe5c | /build/idea-sandbox/system/python_stubs/-57053121/scipy/stats/mvn.py | 4839dbc5f44d33ff30f8a051e8b11af29844004e | [] | no_license | qkpqkp/PlagCheck | 13cb66fd2b2caa2451690bb72a2634bdaa07f1e6 | d229904674a5a6e46738179c7494488ca930045e | refs/heads/master | 2023-05-28T15:06:08.723143 | 2021-06-09T05:36:34 | 2021-06-09T05:36:34 | 375,235,940 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,653 | py | # encoding: utf-8
# module scipy.stats.mvn
# from C:\Users\Doly\Anaconda3\lib\site-packages\scipy\stats\mvn.cp37-win_amd64.pyd
# by generator 1.147
"""
This module 'mvn' is auto-generated with f2py (version:2).
Functions:
value,inform = mvnun(lower,upper,means,covar,maxpts=d*1000,abseps=1e-06,releps=1e-06)
value,inform = mvnun_weighted(lower,upper,means,weights,covar,maxpts=d*1000,abseps=1e-06,releps=1e-06)
error,value,inform = mvndst(lower,upper,infin,correl,maxpts=2000,abseps=1e-06,releps=1e-06)
COMMON blocks:
/dkblck/ ivls
.
"""
# no imports
# Variables with simple values
__version__ = b'$Revision: $'
# functions
def dkblck(*args, **kwargs): # real signature unknown
""" 'i'-scalar """
pass
def mvndst(lower, upper, infin, correl, maxpts=None, abseps=None, releps=None): # real signature unknown; restored from __doc__
"""
error,value,inform = mvndst(lower,upper,infin,correl,[maxpts,abseps,releps])
Wrapper for ``mvndst``.
Parameters
----------
lower : input rank-1 array('d') with bounds (n)
upper : input rank-1 array('d') with bounds (n)
infin : input rank-1 array('i') with bounds (n)
correl : input rank-1 array('d') with bounds (n*(n-1)/2)
Other Parameters
----------------
maxpts : input int, optional
Default: 2000
abseps : input float, optional
Default: 1e-06
releps : input float, optional
Default: 1e-06
Returns
-------
error : float
value : float
inform : int
"""
pass
def mvnun(lower, upper, means, covar, maxpts=None, abseps=None, releps=None): # real signature unknown; restored from __doc__
"""
value,inform = mvnun(lower,upper,means,covar,[maxpts,abseps,releps])
Wrapper for ``mvnun``.
Parameters
----------
lower : input rank-1 array('d') with bounds (d)
upper : input rank-1 array('d') with bounds (d)
means : input rank-2 array('d') with bounds (d,n)
covar : input rank-2 array('d') with bounds (d,d)
Other Parameters
----------------
maxpts : input int, optional
Default: d*1000
abseps : input float, optional
Default: 1e-06
releps : input float, optional
Default: 1e-06
Returns
-------
value : float
inform : int
"""
pass
def mvnun_weighted(lower, upper, means, weights, covar, maxpts=None, abseps=None, releps=None): # real signature unknown; restored from __doc__
"""
value,inform = mvnun_weighted(lower,upper,means,weights,covar,[maxpts,abseps,releps])
Wrapper for ``mvnun_weighted``.
Parameters
----------
lower : input rank-1 array('d') with bounds (d)
upper : input rank-1 array('d') with bounds (d)
means : input rank-2 array('d') with bounds (d,n)
weights : input rank-1 array('d') with bounds (n)
covar : input rank-2 array('d') with bounds (d,d)
Other Parameters
----------------
maxpts : input int, optional
Default: d*1000
abseps : input float, optional
Default: 1e-06
releps : input float, optional
Default: 1e-06
Returns
-------
value : float
inform : int
"""
pass
# no classes
# variables with complex values
__loader__ = None # (!) real value is '<_frozen_importlib_external.ExtensionFileLoader object at 0x000001CB57F39940>'
__spec__ = None # (!) real value is "ModuleSpec(name='scipy.stats.mvn', loader=<_frozen_importlib_external.ExtensionFileLoader object at 0x000001CB57F39940>, origin='C:\\\\Users\\\\Doly\\\\Anaconda3\\\\lib\\\\site-packages\\\\scipy\\\\stats\\\\mvn.cp37-win_amd64.pyd')"
| [
"qinkunpeng2015@163.com"
] | qinkunpeng2015@163.com |
5f39f18b36d2df57de3e76482cac9428af2e0932 | 09884947c578015a250e41dd8b7c694685490b66 | /deps/kb_meme/meme/share/meme-5.0.4/doc/examples/sample_opal_scripts/FimoClient.py | 4d6638e7ca02ffa686bdcb39492ec0a544b04e2e | [
"MIT"
] | permissive | kbasecollaborations/MotifFinderGibbs | 51e85369656795ead6d41653aea13c0e3ad72cdb | a428a71716b3c0040bd7825dcbea3fc7fbb6823b | refs/heads/master | 2020-04-09T16:06:02.547790 | 2019-08-23T00:53:35 | 2019-08-23T00:53:35 | 160,443,764 | 0 | 1 | MIT | 2019-08-23T00:53:36 | 2018-12-05T01:46:43 | Python | UTF-8 | Python | false | false | 3,569 | py | # This script submits a motif file and a FASTA file to the FIMO web service at meme-suite.org
# and downloads the resulting output to the directory fimo_out.
# Based on the script pdb2pqrclient.pl included in the 2.0.0 Opal-Python
# toolkit, available at http://sourceforge.net/projects/opaltoolkit/ and
# documented at http://nbcr.ucsd.edu/data/docs/opal/documentation.html
# Import the Opal libraries
from AppService_client import \
AppServiceLocator, getAppMetadataRequest, launchJobRequest, \
queryStatusRequest, getOutputsRequest, \
launchJobBlockingRequest, getOutputAsBase64ByNameRequest
from AppService_types import ns0
from os import mkdir
from subprocess import call
from time import sleep
from urlparse import urlparse
from ZSI.TC import String
# Set the location of our service
# A list of meme-suite.org services can be found at http://meme-suite.org/opal2/dashboard?command=serviceList
serviceURL = "http://meme-suite.org/opal2/services/FIMO_4.9.1"
# Instantiate a new service object to interact with. Pass it the service location
appLocator = AppServiceLocator()
appServicePort = appLocator.getAppServicePort(serviceURL)
# Instantiate a new blocking job request
req = launchJobRequest()
# Set the command-line arguments for the job. Note that all
# the MEME Suite programs have a webservice program that invokes them.
#
# fimo_webservice [options] <motifs> <db seqs>
#
# Options:
# -upseqs <file> uploaded sequences
# -pvthresh <pv> output p-value threshold
# -norc scan given strand only
# -help brief help message
#
req._argList = "--upseqs crp0.fasta crp0.meme.xml"
# Alternatively, use the argList setting below to scan the
# preloaded Saccaromyces cerevisiae genome.
#req._argList = "crp0.meme.xml saccharomyces_cerevisiae.na"
# Get contents of local files to be used as input
fastaFile = open("./crp0.fasta", "r")
fastaFileContents = fastaFile.read()
fastaFile.close()
motifFile = open("./crp0.meme.xml", "r")
motifFileContents = motifFile.read()
motifFile.close()
# Set name and content of remote input file.
# Two input files are used, the motif file
#and the FASTA file.
inputFiles = []
motifInputFile = ns0.InputFileType_Def('inputFile')
motifInputFile._name = 'crp0.meme.xml'
motifInputFile._contents = motifFileContents
inputFiles.append(motifInputFile)
fastaInputFile = ns0.InputFileType_Def('inputFile')
fastaInputFile._name = 'crp0.fasta'
fastaInputFile._contents = fastaFileContents
inputFiles.append(fastaInputFile)
req._inputFile = inputFiles
# Launch a non-blocking job
print "Launching non-blocking FIMO job"
resp = appServicePort.launchJob(req)
jobID = resp._jobID
print "Received Job ID:", jobID
# Poll for job status
status = resp._status
print "Polling job status"
while 1:
# print current status
print "Status:"
print "\tCode:", status._code
print "\tMessage:", status._message
print "\tOutput Base URL:", status._baseURL
if (status._code == 8) or (status._code == 4): # STATUS_DONE || STATUS_FAILED
break
print "Waiting 30 seconds"
sleep(30)
# Query job status
status = appServicePort.queryStatus(queryStatusRequest(jobID))
if status._code == 8: # 8 = GramJob.STATUS_DONE
print "\nDownloading Outputs (using wget):\n\n";
output_dir = 'fimo_out'
dir_count = sum(1 for _ in filter(None, urlparse(status._baseURL).path.split("/")))
index_url = status._baseURL + '/index.html'
call(["wget", "-r", "-nc", "-p", "-np", "-nH", "--cut-dirs=" + str(dir_count), "-P", output_dir, index_url])
print "\nJob Complete\n\n";
| [
"mandecent.gupta@gmail.com"
] | mandecent.gupta@gmail.com |
1efc62be091c1a75fe514901746cba92c60e7c77 | e63071a8b39933a32b32d6d4f5eebfabc02a362a | /main.py | 31f6ff08f463bf6604743421ebb89d0052cb4ffd | [] | no_license | 8600/gps | 8e6f28e667e7a5cba7ed30aaba04d6a74e546f2c | a22746ad543e867114c3cbe1dc0da6529a831a7f | refs/heads/master | 2020-04-04T02:47:45.917567 | 2018-11-01T13:59:08 | 2018-11-01T13:59:08 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,114 | py | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import requests
import json
# import MySQLdb
from flask import Flask, request
app = Flask(__name__)
@app.route('/gps/<ip>')
def index(ip):
if (ip):
# 打开数据库连接
db = MySQLdb.connect(host="192.168.0.226",user="root",passwd="GwlF97#6",db="gps",charset="utf8")
# # 使用cursor()方法获取操作游标
# cursor = db.cursor()
# 没有headers会返回页面不存在
headers = {
"cookie": "__cfduid=dc0fd8b83edb2d0deba1e38199a73424f1541080023;",
"content-type": "application/vnd.maxmind.com-city+json; charset=UTF-8; version=2.1",
"user-agent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
}
res = requests.get('https://www.maxmind.com/geoip/v2.1/city/'+ ip +'?use-downloadable-db=1&demo=1' , headers = headers)
res.encoding = 'utf-8'
resData = res.json()
# print(resData)
if (not ("city" in resData)): resData["city"] = ""
if (not ("location" in resData)): resData["location"] = ""
if (not ("postal" in resData)): resData["postal"] = ""
if (not ("subdivisions" in resData)): resData["subdivisions"] = ""
# print(resData)
data = "'{city}', '{continent}', '{country}', '{location}', '{registered_country}', '{subdivisions}', '{traits}'".format(city=resData["city"],continent=resData["continent"], country=resData["country"], location=resData["location"], registered_country=resData["registered_country"], subdivisions=resData["subdivisions"], traits=resData["traits"])
print(data)
# SQL 插入语句
sql = """INSERT INTO location(city,
continent, country, location, registered_country, subdivisions, traits)
VALUES (""" + data +""")"""
try:
# 执行sql语句
cursor.execute(sql)
# 提交到数据库执行
db.commit()
except:
# Rollback in case there is any error
db.rollback()
# 关闭数据库连接
db.close()
return res.text
return ''
if __name__ == '__main__':
app.run(host="0.0.0.0" , port=5000) | [
"100284685@qq.com"
] | 100284685@qq.com |
93fc071c4ffa798313387ee42628f62f4a1e1508 | 31679bea8a70b221e7fde36aad25f389db6be8e9 | /models/informacion.py | 71249e0bf521d8f7e3cedb21f285e7535423847d | [] | no_license | dmr-sxe/odoo_basico | 5e3e09f623945b7345567cfb1e7cf6fe49f2a186 | d2e9ef917677707a14e6cf6e8d6699d31aae2965 | refs/heads/master | 2020-12-20T07:19:52.597232 | 2020-02-18T11:52:24 | 2020-02-18T11:52:24 | 235,999,882 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,347 | py | # -*- coding: utf-8 -*-
import locale
from odoo import models, fields, api
from odoo.exceptions import ValidationError
from odoo.exceptions import Warning
import pytz
class informacion (models.Model):
_name="odoo_basico.informacion" # Será o nome da táboa
_description = "Tipos de datos básicos"
_order = "data_hora desc"
_sql_constraints = [('nome unico', 'unique(name)', 'Non se pode repetir o nome')]
name = fields.Char(required=True,size=20,string="Titulo")
descripcion = fields.Text(string="A Descripción")# string é a etiqueta do campo
autorizado = fields.Boolean(string="¿Autorizado?", default=True)
sexo_traducido = fields.Selection([('Hombre', 'Home'), ('Mujer', 'Muller'), ('Otros', 'Outros')], string='Sexo')
data = fields.Date(string="Data", default=lambda self: fields.Date.today())
mes_data = fields.Char(compute="_mes_data", size=15, store=True)
data_hora = fields.Datetime(string="Data e Hora", default=lambda self: fields.Datetime.now())
hora_utc = fields.Char(compute="_hora_utc",string="Hora UTC", size=15, store=True)
hora_usuario = fields.Char(compute="_hora_usuario",string="Hora Usuario", size=15, store=True)
mes_castelan = fields.Char(compute="_mes_castelan", size=15, store=True)
alto_en_cms = fields.Integer(string="Alto en centímetros")
longo_en_cms = fields.Integer(string="Longo en centímetros")
ancho_en_cms = fields.Integer(string="Ancho en centímetros")
volume = fields.Float(compute="_volume", store=True)
volume_entre_100 = fields.Float(compute="_volume_entre_100", store=False)
peso = fields.Float(digits=(6, 2), string="Peso en Kg.s", default=2.7)
densidade = fields.Float(compute="_densidade", store=True)
# Os campos Many2one crean un campo na BD
moeda_id = fields.Many2one('res.currency',domain="[('position','=','after')]")
# con domain, filtramos os valores mostrados. Pode ser mediante unha constante (vai entre comillas) ou unha variable
gasto = fields.Monetary("Gasto", 'moeda_id')
foto = fields.Binary(string="Foto")
nome_adxunto = fields.Char(size=20, string="Nome Adxunto")
adxunto = fields.Binary(string="Arquivo Adxunto")
moeda_euro_id = fields.Many2one('res.currency',
default=lambda self: self.env['res.currency'].search([('name', '=', "EUR")],
limit=1))
gasto_en_euros = fields.Monetary("Gasto en Euros", 'moeda_euro_id')
moeda_en_texto = fields.Char(related="moeda_id.currency_unit_label", string="Moeda en formato texto",store=True)
creador_da_moeda = fields.Char(related="moeda_id.create_uid.login", string="Usuario creador da moeda", store=True)
@api.depends('alto_en_cms', 'longo_en_cms', 'ancho_en_cms')
def _volume(self):
for rexistro in self:
rexistro.volume = float(rexistro.alto_en_cms) * float(rexistro.longo_en_cms) * float(rexistro.ancho_en_cms)
@api.depends('alto_en_cms', 'longo_en_cms', 'ancho_en_cms')
def _volume_entre_100(self):
for rexistro in self:
rexistro.volume_entre_100 = (float(rexistro.alto_en_cms) * float(rexistro.longo_en_cms) * float(rexistro.ancho_en_cms))/100
@api.depends('volume', 'peso')
def _densidade(self):
for rexistro in self:
if rexistro.volume !=0:
rexistro.densidade = 100 * (float(rexistro.peso) / float(rexistro.volume))
else:
rexistro.densidade = 0
@api.constrains('peso') # Ao usar ValidationError temos que importar a libreria ValidationError
def _constrain_peso(self): # from odoo.exceptions import ValidationError
for rexistro in self:
if rexistro.peso < 1 or rexistro.peso > 4:
raise ValidationError('Os peso de %s ten que ser entre 1 e 4 ' % rexistro.name)
@api.depends('data')
def _mes_data(self):
locale.setlocale(locale.LC_TIME, "gl_ES.utf8")
for rexistro in self: #O idioma é o configurado en locale na máquina de odoo
rexistro.mes_data = rexistro.data.strftime("%B")
@api.depends('data')
def _mes_castelan(self):
locale.setlocale(locale.LC_TIME, "es_ES.utf8")
for rexistro in self:
rexistro.mes_castelan = rexistro.data.strftime("%B")
@api.depends('data_hora')
def _hora_utc(self):
for rexistro in self: # A hora se almacena na BD en horario UTC (2 horas menos no verán, 1 hora menos no inverno)
rexistro.hora_utc = rexistro.data_hora.strftime("%H:%M:%S")
@api.depends('data_hora')
def _hora_usuario(self):
for rexistro in self: # Convertimos a hora UTC a hora do usuario
self.actualiza_hora()
def ver_contexto(self): # Engadimos na vista un button no header. O name do button é o nome da función
for rexistro in self: #Ao usar warning temos que importar a libreria from odoo.exceptions import Warning
raise Warning('Contexto: %s' % rexistro.env.context) #env.context é un diccionario https://www.w3schools.com/python/python_dictionaries.asp
return True
def convirte_data_hora_de_utc_a_timezone_do_usuario(self,data_hora_utc_object): # recibe a data hora en formato object
usuario_timezone = pytz.timezone(self.env.user.tz or 'UTC') # obter a zona horaria do usuario
return pytz.UTC.localize(data_hora_utc_object).astimezone(usuario_timezone) # hora co horario do usuario en formato object
def data_hora_local(self):
data_hora_usuario_object = self.convirte_data_hora_de_utc_a_timezone_do_usuario(fields.Datetime.now())
for rexistro in self:
data_hora_do_campo_da_bd = self.convirte_data_hora_de_utc_a_timezone_do_usuario(rexistro.data_hora)
raise Warning('Datetime.now() devolve a hora UTC %s cambiamola coa configuración horaria do usuario %s cambiamos tamén a do campo data_hora %s'
% (fields.Datetime.now().strftime ('%Y-%m-%d %H:%M'),data_hora_usuario_object,data_hora_do_campo_da_bd))
return True
def actualiza_hora(self):
for rexistro in self:
rexistro.hora_usuario = self.convirte_data_hora_de_utc_a_timezone_do_usuario(rexistro.data_hora).strftime(
"%H:%M:%S") | [
"dani-15-94@hotmail.com"
] | dani-15-94@hotmail.com |
5f97174d0a8dcb6bbc3bca08d7f93b609540007a | c446eb37b63a4acaa622e4f45bab9d88d9f77c44 | /Week1/Lectures/the_restaurant_recommendations.py | 371e8e67cdcb21602ba5c7bf6fe2d057d37db464 | [] | no_license | JSLobo/LTP_Crafting_Quality_Code | ecc968f383bbda58ff6e0106eedc4c571ed4fdc0 | e6eca91f48b1b9fc1eb39f8e713a81942d8403e9 | refs/heads/master | 2020-12-07T06:09:45.340704 | 2020-02-03T06:52:05 | 2020-02-03T06:52:05 | 232,655,111 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,493 | py | """
A restaurant recommendation system.
Here are some example dictionaries. These correspond to the information in
restaurants_small.txt.
Restaurant name to rating:
# dict of {str: int}
{'Georgie Porgie': 87,
'Queen St. Cafe': 82,
'Dumplings R Us': 71,
'Mexican Grill': 85,
'Deep Fried Everything': 52}
Price to list of restaurant names:
# dict of {str, list of str}
{'$': ['Queen St. Cafe', 'Dumplings R Us', 'Deep Fried Everything'],
'$$': ['Mexican Grill'],
'$$$': ['Georgie Porgie'],
'$$$$': []}
Cuisine to list of restaurant names:
# dict of {str, list of str}
{'Canadian': ['Georgie Porgie'],
'Pub Food': ['Georgie Porgie', 'Deep Fried Everything'],
'Malaysian': ['Queen St. Cafe'],
'Thai': ['Queen St. Cafe'],
'Chinese': ['Dumplings R Us'],
'Mexican': ['Mexican Grill']}
With this data, for a price of '$' and cuisines of ['Chinese', 'Thai'], we
would produce this list:
[[82, 'Queen St. Cafe'], [71, 'Dumplings R Us']]
"""
# The file containing the restaurant data.
FILENAME = 'restaurants_small.txt'
def recommend(file, price, cuisines_list):
"""(file open for reading, str, list of str) -> list of [int, str] list
Find restaurants in file that are priced according to price and that are
tagged with any of the items in cuisines_list. Return a list of lists of
the form [rating%, restaurant name], sorted by rating%.
"""
# Read the file and build the data structures.
# - a dict of {restaurant name: rating%}
# - a dict of {price: list of restaurant names}
# - a dict of {cuisine: list of restaurant names}
name_to_rating, price_to_names, cuisine_to_names = read_restaurants(file)
# Look for price or cuisines first?
# Price: look up the list of restaurant names for the requested price.
names_matching_price = price_to_names[price]
# Now we have a list of restaurants in the right price range.
# Need a new list of restaurants that serve one of the cuisines.
names_final = filter_by_cuisine(names_matching_price, cuisine_to_names, cuisines_list)
# Now we have a list of restaurants that are in the right price range and serve the requested cousine.
# Need to look at ratings and sort this list.
result = build_rating_list(name_to_rating, names_final)
# We're done! Return that sorted list.
return result
import doctest
doctest.testmod()
def build_rating_list(name_to_rating, names_final):
""" (dict of {str: int}, list of str) -> list of list of {str, int}
Return a list of [rating%, restaurant name], sorted by rating%
>>> name_to_rating = {'Georgie Porgie': 87,
'Queen St. Cafe': 82,
'Dumplings R Us': 71,
'Mexican Grill': 85,
'Deep Fried Everything': 52}
>>> names = ['Queen St. Cafe', 'Dumplings R Us']
>>> build_rating_list(name_to_rating, names)
[[82, 'Queen St. Cafe'], [71, 'Dumplings R Us']]
"""
def filter_by_cuisine(names_matching_price, cuisine_to_names, cuisines_list):
""" (list of str, dict of {str: list of str}, list of str) -> list of str
>>> names = ['Queen St. Cafe', 'Dumplings R Us', 'Deep Fried Everything']
>>> cuis = {'Canadian': ['Georgie Porgie'],
'Pub Food': ['Georgie Porgie', 'Deep Fried Everything'],
'Malaysian': ['Queen St. Cafe'],
'Thai': ['Queen St. Cafe'],
'Chinese': ['Dumplings R Us'],
'Mexican': ['Mexican Grill']}
>>> cuisines = ['Chinese', 'Thai']
>>> filter_by_cuisine(names, cuis, cuisines)
['Queen St. Cafe', 'Dumplings R Us']
"""
def read_restaurants(filename):
""" (file) -> (dict, dict, dict)
Return a tuple of three dictionaries based on the information in the file:
- a dict of {restaurant name: rating%}
- a dict of {price: list of restaurant names}
- a dict of {cuisine: list of restaurant names}
"""
file = open(filename, "r")
name_to_rating = {}
price_to_names = {'$': [], '$$': [], '$$$': [], '$$$$': []}
cuisine_to_names = {'Canadian': [], 'Pub Food': [], 'Malaysian': [], 'Thai': [], 'Chinese': [], 'Mexican': []}
count = 1
for line in file:
if count % 5 == 1 and line is not "":
restaurant_name = line.rstrip("\n")
# print(restaurant_name)
count = count + 1
elif count % 5 == 2 and line is not "":
rating = int((line.rstrip("\n")).rstrip("%"))
name_to_rating[restaurant_name] = rating
count = count + 1
elif count % 5 == 3 and line is not "":
price = line.rstrip("\n")
(price_to_names.get(price)).append(restaurant_name)
count = count + 1
elif count % 5 == 4 and line is not "":
cuisines = line.rstrip("\n")
cuisines_list = get_cuisines_separated(cuisines)
# print(get_cuisines_separated(cuisines))
for cuisine in cuisines_list:
(cuisine_to_names.get(cuisine)).append(restaurant_name)
count = count + 1
elif count % 5 == 0 or line is "":
count = 1
file.close()
return name_to_rating, price_to_names, cuisine_to_names
def get_cuisines_separated(cuisines):
cuisines_list = []
cuisine = ''
for ch in cuisines:
if ch != ',':
cuisine = cuisine + ch
elif ch == ',':
cuisines_list.append(cuisine)
cuisine = ''
cuisines_list.append(cuisine)
return cuisines_list
| [
"juan.lobo@udea.edu.co"
] | juan.lobo@udea.edu.co |
9ab43ec75ffaf5f0bced7267b3d013f97f415be5 | 1f0a954c402fd3553bd83cf2c27cabfcff43fa68 | /MAGIC/Scheduler/scheduler.py | efe019288afad658f2809f33e426694dc1b5e3b1 | [] | no_license | anjanabalabhaskara/imprint-mgp | 0a2f1675fd82267d25e45f0f3c63c812cbc0ab75 | f1b2746224f08abb51d7fc2357a5e110a8d26c81 | refs/heads/main | 2023-05-18T01:00:37.910991 | 2021-06-03T13:16:30 | 2021-06-03T13:16:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,754 | py | #!/usr/bin/python
from __future__ import print_function
import sys
import random
import cplex
from cplex.exceptions import CplexError
#Loading Data
T,alpha, eta_c, eta_d, gamma_b = 6, 10, .95, .98, 1
rho_b_t = [random.randint(3,16) for t in range(T)]
rho_s_t = [random.randint(3,16) for t in range(T)]
RU, RD= 500, 500 #in kw
E_max = 5000 # in kwh
P_DG_min, P_DG_max = 500, 4000
P_DG_init = 1000
P_MG_min, P_MG_max = 100, 2000
P_B_max, P_B_min = 1000, 0
SOC_init, SOC_final = 50,50
SOC_min, SOC_max = 20, 100
P_PV_t = [random.randint(100,150) for t in range(T)] #PV Generation
P_WT_t = [random.randint(150,250) for t in range(T)] #Wind Generation
P_L_t = [random.randint(3000,4000) for t in range(T)]#Load
#-------------------------------------------------------
#Decision variable names
my_colnames = []
#Diesel generator P_DG_t for all t
my_colnames += ["P_DG_"+str(t) for t in range(T)]
#Main grid buy and sell P_MG_b_t and P_MG_s_t for all t
my_colnames += ["P_MG_b_"+str(t) for t in range(T)]
my_colnames += ["P_MG_s_"+str(t) for t in range(T)]
#Battery Charge and Discharge P_B_dch_t and P_B_ch_t
my_colnames += ["P_B_dch_"+str(t) for t in range(T)]
my_colnames += ["P_B_ch_"+str(t) for t in range(T)]
#State of charge SOC_t for all t
my_colnames += ["SOC_"+str(t) for t in range(T)]
#---------------------------------------------------------
#Coefficients of the Objective
my_obj = []
#Diesel generator cost coefficient alpha for all t
my_obj += [alpha for t in range(T)]
#Main grid buy and sell prices at different times
my_obj += [rho_b_t[t] for t in range(T)]
my_obj += [-1*rho_s_t[t] for t in range(T)]
#Battery depreciation cost
my_obj += [gamma_b for t in range(T)]
my_obj += [gamma_b for t in range(T)]
#State of charge variable has no contibution to the objective
my_obj += [0 for t in range(T)]
#------------------------------------------------------------
#Upper and Lower Bounds of the decision variables
my_ub, my_lb = [], []
#Diesel Generator maximum and minimum generation bounds
my_ub += [P_DG_max for t in range(T)]
my_lb += [P_DG_min for t in range(T)]
#Maximum and minimum power buy and sell from Main grid
my_ub += [P_MG_max for t in range(T)] #Buying power limits
my_lb += [P_MG_min for t in range(T)]
my_ub += [P_MG_max for t in range(T)] #Selling power limits
my_lb += [P_MG_min for t in range(T)]
#Battery Charging and discharging power limits
my_ub += [P_B_max for t in range(T)] #discharging power limits
my_lb += [P_B_min for t in range(T)]
my_ub += [P_B_max for t in range(T)] #charging power limits
my_lb += [P_B_min for t in range(T)]
#State of charge can be between 20% to 100%
my_ub += [SOC_max for t in range(T)]
my_lb += [SOC_min for t in range(T)]
#-----------------------------------------------------------------
#Type of the decision variables, Integer "I", Binary "B", and Continous "C"
my_ctype = ""
#Diesel generator Power
my_ctype += "I"*T
#Main grid buy and sell power
my_ctype += "I"*T #Buying
my_ctype += "I"*T #Selling
#Battery discharging and charging power
my_ctype += "I"*T #discharging
my_ctype += "I"*T #charging
#State of charge is a integer decision variable
my_ctype += "I"*T
#Preparing constraints
my_rownames, my_rhs, my_sense, rows = [],[],"",[]
#Power Balance constraint, at every time slot t
for t in range(T):
my_rownames+= ["Power_Balance_"+str(t)]
my_rhs += [P_L_t[t]-(P_PV_t[t]+P_WT_t[t])] #Load - (PV + WT generation)
my_sense +="G"
var = ["P_DG_"+str(t), "P_MG_b_"+str(t), "P_MG_s_"+str(t), "P_B_dch_"+str(t), "P_B_ch_"+str(t)]
coef = [1,1,-1,1,-1]
rows.append([var,coef])
#Ramp up and down limits of the diesel generator
# For the First Hour
my_rownames += ["RU_"+str(0), "RD_"+str(0)]
my_rhs += [RU+P_DG_init ,P_DG_init-RD] #Ramp up and down limit
my_sense +="LG"
var = ["P_DG_"+str(0)]
coef = [1]
rows.append([var,coef])#Ramp UP
var = ["P_DG_"+str(0)]
coef = [1]
rows.append([var,coef])#Ramp down
#For the hour second and so on till last
for t in range(1,T):
#Ramp up
my_rownames += ["RU_"+str(t), "RD_"+str(t)]
my_rhs += [RU,RD] #Ramp up and down limit
my_sense +="LL"
var = ["P_DG_"+str(t), "P_DG_"+str(t-1)]
coef = [1,-1]
rows.append([var,coef])#Ramp UP
var = ["P_DG_"+str(t), "P_DG_"+str(t-1)]
coef = [-1,1]
rows.append([var,coef])#Ramp down
#Battery State of charge constraint
#For the first hour
my_rownames+= ["SOC_"+str(0)]
my_rhs += [SOC_init] #
my_sense += "E"
var = ["SOC_"+str(0)]
coef = [1]
rows.append([var,coef])
for t in range(1,T):
my_rownames+= ["SOC_"+str(t)]
my_rhs += [0] #
my_sense += "E"
var = ["SOC_"+str(t), "SOC_"+str(t-1),"P_B_dch_"+str(t-1), "P_B_ch_"+str(t-1)]
coef = [1, -1, 1/(eta_d*E_max), (-1*eta_c)/E_max]
rows.append([var,coef])
# #Power Limits of all the generators
# for t in range(T):
# #Diesel Limits
# my_rownames+= ["DG_min_"+str(t), "DG_max_"+str(t)]
# my_rhs += [P_DG_min, P_DG_max] #
# my_sense += "GL"
# var = ["P_DG_"+str(t)]
# coef = [1]
# rows.append([var,coef])#Diesel Minimum Limit
# var = ["P_DG_"+str(t)]
# coef = [1]
# rows.append([var,coef])#Diesel Maximum Limit
# #Main Grid Limits
# my_rownames+= ["MG_buy_min_"+str(t), "MG_buy_max_"+str(t), "MG_sell_min_"+str(t), "MG_sell_max_"+str(t)]
# my_rhs += [P_MG_min, P_MG_max, P_MG_min, P_MG_max] #
# my_sense += "GLGL"
# var = ["P_MG_b_"+str(t)]
# coef = [1]
# rows.append([var,coef])#MG buy Minimum Limit
# var = ["P_MG_b_"+str(t)]
# coef = [1]
# rows.append([var,coef])#MG buy Maximum Limit
# var = ["P_MG_s_"+str(t)]
# coef = [1]
# rows.append([var,coef])#MG sell Minimum Limit
# var = ["P_MG_s_"+str(t)]
# coef = [1]
# rows.append([var,coef])#MG sell Maximum Limit
# #Battery power Limits
# my_rownames+= ["Bat_dch_min_"+str(t), "Bat_dch_max_"+str(t), "Bat_ch_min_"+str(t), "Bat_ch_max_"+str(t)]
# my_rhs += [P_B_min, P_B_max, P_B_min, P_B_max] #
# my_sense += "GLGL"
# var = ["P_B_dch_"+str(t)]
# coef = [1]
# rows.append([var,coef])#MG buy Minimum Limit
# var = ["P_B_dch_"+str(t)]
# coef = [1]
# rows.append([var,coef])#MG buy Maximum Limit
# var = ["P_B_ch_"+str(t)]
# coef = [1]
# rows.append([var,coef])#MG sell Minimum Limit
# var = ["P_B_ch_"+str(t)]
# coef = [1]
# rows.append([var,coef])#MG sell Maximum Limit
# #SOC limits
# my_rownames+= ["SOC_min_"+str(t), "SOC_max_"+str(t)]
# my_rhs += [SOC_min, SOC_max] #
# my_sense += "GL"
# var = ["SOC_"+str(t)]
# coef = [1]
# rows.append([var,coef])#SOC minimum Limit
# var = ["SOC_"+str(t)]
# coef = [1]
# rows.append([var,coef])#SOC maximum
# print(len(my_obj), len(my_lb), len(my_ub), len(my_ctype), len(my_colnames))
# print(len(rows), len(my_sense), len(my_rhs), len(my_rownames))
#Creating a CPLEX object
my_prob = cplex.Cplex()
#Setting the objective function as Minimize
my_prob.objective.set_sense(my_prob.objective.sense.minimize)
#Adding variables and their specifications
my_prob.variables.add(obj=my_obj, lb=my_lb, ub=my_ub, types=my_ctype, names=my_colnames)
#Adding constraints
my_prob.linear_constraints.add(lin_expr=rows, senses=my_sense, rhs=my_rhs, names=my_rownames)
#Solving...
my_prob.solve()
print("Solution value = ", my_prob.solution.get_objective_value())
numcols = my_prob.variables.get_num()
numrows = my_prob.linear_constraints.get_num()
slack = my_prob.solution.get_linear_slacks()
x = my_prob.solution.get_values()
# for j in range(numrows):
# print("Row %d: Slack = %10f" % (j, slack[j]))
for j in range(numcols):
print("%s:%10f" % (my_colnames[j], x[j]))
my_prob.write("Scheduler.lp")
# def populatebyrow(prob):
# prob.objective.set_sense(prob.objective.sense.maximize)
# prob.variables.add(obj=my_obj, lb=my_lb, ub=my_ub, types=my_ctype,
# names=my_colnames)
# rows = [[["x1", "x2", "x3", "x4"], [-1.0, 1.0, 1.0, 10.0]],
# [["x1", "x2", "x3"], [1.0, -3.0, 1.0]],
# [["x2", "x4"], [1.0, -3.5]]]
# prob.linear_constraints.add(lin_expr=rows, senses=my_sense,
# rhs=my_rhs, names=my_rownames)
# def populatebycolumn(prob):
# prob.objective.set_sense(prob.objective.sense.minimize)
# prob.linear_constraints.add(rhs=my_rhs, senses=my_sense,
# names=my_rownames)
# c = [[["r1", "r2"], [-1.0, 1.0]],
# [["r1", "r2"
# , "r3"], [1.0, -3.0, 1.0]],
# [["r1", "r2"], [1.0, 1.0]],
# [["r1", "r3"], [10.0, -3.5]]]
# prob.variables.add(obj=my_obj, lb=my_lb, ub=my_ub,
# names=my_colnames, types=my_ctype, columns=c)
# def populatebynonzero(prob):
# prob.objective.set_sense(prob.objective.sense.maximize)
# prob.linear_constraints.add(rhs=my_rhs, senses=my_sense,
# names=my_rownames)
# prob.variables.add(obj=my_obj, lb=my_lb, ub=my_ub, types=my_ctype,
# names=my_colnames)
# rows = [0, 0, 0, 0, 1, 1, 1, 2, 2]
# cols = [0, 1, 2, 3, 0, 1, 2, 1, 3]
# vals = [-1.0, 1.0, 1.0, 10.0, 1.0, -3.0, 1.0, 1.0, -3.5]
# prob.linear_constraints.set_coefficients(zip(rows, cols, vals))
# def mipex1(pop_method):
# try:
# my_prob = cplex.Cplex()
# if pop_method == "r":
# handle = populatebyrow(my_prob)
# elif pop_method == "c":
# handle = populatebycolumn(my_prob)
# elif pop_method == "n":
# handle = populatebynonzero(my_prob)
# else:
# raise ValueError('pop_method must be one of "r", "c" or "n"')
# my_prob.solve()
# except CplexError as exc:
# print(exc)
# return
# print()
# # solution.get_status() returns an integer code
# print("Solution status = ", my_prob.solution.get_status(), ":", end=' ')
# # the following line prints the corresponding string
# print(my_prob.solution.status[my_prob.solution.get_status()])
# print("Solution value = ", my_prob.solution.get_objective_value())
# numcols = my_prob.variables.get_num()
# numrows = my_prob.linear_constraints.get_num()
# slack = my_prob.solution.get_linear_slacks()
# x = my_prob.solution.get_values()
# for j in range(numrows):
# print("Row %d: Slack = %10f" % (j, slack[j]))
# for j in range(numcols):
# print("Column %d: Value = %10f" % (j, x[j]))
# my_prob.write("mipex.lp")
# if __name__ == "__main__":
# if len(sys.argv) != 2 or sys.argv[1] not in ["-r", "-c", "-n"]:
# print("Usage: mipex1.py -X")
# print(" where X is one of the following options:")
# print(" r generate problem by row")
# print(" c generate problem by column")
# print(" n generate problem by nonzero")
# print(" Exiting...")
# sys.exit(-1)
# mipex1(sys.argv[1][1])
| [
"mondal.arnab27@gmail.com"
] | mondal.arnab27@gmail.com |
0a4daea046eac8ec91a24003cb1217ba6e3e0644 | 52c08364b57c454dff5a089a11afecdc29d43595 | /face_detector.py | cbb6624c318265b5e486addbaf508a6920c64a30 | [] | no_license | GengCauWong/face_landmark_1000 | e069e2acda9b44b6c7b22213a756acb1d79fcee3 | 3297290374b84b28ac9767b5d3894ca5304bbc95 | refs/heads/master | 2022-04-10T11:47:29.835971 | 2020-03-24T09:15:40 | 2020-03-24T09:15:40 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,972 | py | import torch
import onnx
import onnxruntime
import numpy as np
from itertools import product as product
from math import ceil
import cv2
class FaceDetector(object):
def __init__(self):
self.model_path = r'model/FaceDetector.onnx'
self.onnx_model = onnx.load(self.model_path)
onnx.checker.check_model(self.onnx_model)
self.ort_session = onnxruntime.InferenceSession(self.model_path)
self.cfg = self.config()
self.conf_threshold = 0.5
self.top_k = 5000
self.nms_threshold = 0.4
self.keep_top_k = 750
self.vis_threshold = 0.6
self.image_size = (640, 640)
def run(self, image):
ori_height, ori_width = image.shape[:2]
processed_image, scale, img_height, img_width = self.preprocess(image)
ort_inputs = {self.ort_session.get_inputs()[0].name: self.to_numpy(processed_image)}
locations, confidences, landmarks = self.ort_session.run(None, ort_inputs)
detections, landmarks = self.postprocess(processed_image, locations, confidences, landmarks, scale, img_height,
img_width)
detections[:, 0] = detections[:, 0] * ori_width / self.image_size[0]
detections[:, 1] = detections[:, 1] * ori_height / self.image_size[1]
detections[:, 2] = detections[:, 2] * ori_width / self.image_size[0]
detections[:, 3] = detections[:, 3] * ori_height / self.image_size[1]
return detections, landmarks
def show_result(self, image, detections):
for d in detections:
if self.vis_threshold > d[4]:
continue
image = cv2.rectangle(image, (int(d[0]), int(d[1])), (int(d[2]), int(d[3])), (0, 0, 255), 2)
cv2.imshow('', image)
cv2.waitKey(1)
def preprocess(self, image):
image = cv2.resize(image, self.image_size)
img = np.float32(image)
img_height, img_width, _ = img.shape
scale = torch.Tensor([img_width, img_height, img_width, img_height])
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.detach()
img = img.to('cuda')
scale = scale.to('cuda')
return img, scale, img_height, img_width
def postprocess(self, image, locations, confidences, landmarks, scale, img_height, img_width):
priorbox = PriorBox(self.cfg, image_size=self.image_size)
priors = priorbox.forward()
priors = priors.to('cuda')
resize = 1
prior_data = priors.data
locations = torch.from_numpy(locations).to('cuda')
confidences = torch.from_numpy(confidences).to('cuda')
landmarks = torch.from_numpy(landmarks).to('cuda')
boxes = self.decode(locations.data.squeeze(0), prior_data, self.cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
scores = confidences.squeeze(0).data.cpu().numpy()[:, 1]
landmarks = self.decode_landmarks(landmarks.data.squeeze(0), prior_data, self.cfg['variance'])
scale1 = torch.Tensor([image.shape[3], image.shape[2], image.shape[3], image.shape[2],
image.shape[3], image.shape[2], image.shape[3], image.shape[2],
image.shape[3], image.shape[2]])
scale1 = scale1.to('cuda')
landmarks = landmarks * scale1 / resize
landmarks = landmarks.cpu().numpy()
inds = np.where(scores > self.conf_threshold)[0]
boxes = boxes[inds]
landmarks = landmarks[inds]
scores = scores[inds]
order = scores.argsort()[::-1][:self.top_k]
boxes = boxes[order]
landmarks = landmarks[order]
scores = scores[order]
detections = np.hstack((boxes, scores[:, np.newaxis])).astype(np.float32, copy=False)
keep = self.py_cpu_nms(detections, self.nms_threshold)
# keep = nms(dets, args.nms_threshold,force_cpu=args.cpu)
detections = detections[keep, :]
landmarks = landmarks[keep]
# keep top-K faster NMS
detections = detections[:self.keep_top_k, :]
landmarks = landmarks[:self.keep_top_k, :]
# detections = np.concatenate((detections, landmarks), axis=1)
return detections, landmarks
def decode(self, loc, priors, variances):
boxes = torch.cat((
priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:],
priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1)
boxes[:, :2] -= boxes[:, 2:] / 2
boxes[:, 2:] += boxes[:, :2]
return boxes
def decode_landmarks(self, pre, priors, variances):
landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:],
priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:],
), dim=1)
return landms
def config(self):
cfg = {
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
}
return cfg
def to_numpy(self, tensor):
return tensor.detach().cpu().numpy() if tensor.requires_grad else tensor.cpu().numpy()
def py_cpu_nms(self, dets, thresh):
"""Pure Python NMS baseline."""
x1 = dets[:, 0]
y1 = dets[:, 1]
x2 = dets[:, 2]
y2 = dets[:, 3]
scores = dets[:, 4]
areas = (x2 - x1 + 1) * (y2 - y1 + 1)
order = scores.argsort()[::-1]
keep = []
while order.size > 0:
i = order[0]
keep.append(i)
xx1 = np.maximum(x1[i], x1[order[1:]])
yy1 = np.maximum(y1[i], y1[order[1:]])
xx2 = np.minimum(x2[i], x2[order[1:]])
yy2 = np.minimum(y2[i], y2[order[1:]])
w = np.maximum(0.0, xx2 - xx1 + 1)
h = np.maximum(0.0, yy2 - yy1 + 1)
inter = w * h
ovr = inter / (areas[i] + areas[order[1:]] - inter)
inds = np.where(ovr <= thresh)[0]
order = order[inds + 1]
return keep
class PriorBox(object):
def __init__(self, cfg, image_size=None):
super(PriorBox, self).__init__()
self.min_sizes = cfg['min_sizes']
self.steps = cfg['steps']
self.clip = cfg['clip']
self.image_size = image_size
self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps]
self.name = "s"
def forward(self):
anchors = []
for k, f in enumerate(self.feature_maps):
min_sizes = self.min_sizes[k]
for i, j in product(range(f[0]), range(f[1])):
for min_size in min_sizes:
s_kx = min_size / self.image_size[1]
s_ky = min_size / self.image_size[0]
dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]]
dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]]
for cy, cx in product(dense_cy, dense_cx):
anchors += [cx, cy, s_kx, s_ky]
# back to torch land
output = torch.Tensor(anchors).view(-1, 4)
if self.clip:
output.clamp_(max=1, min=0)
return output
if __name__ == '__main__':
image = cv2.imread('data/1.jpg')
handle = FaceDetector()
handle.run(image)
| [
"vivianziweiyang@gmail.com"
] | vivianziweiyang@gmail.com |
3df813b7b10c86143253c4d824e3408615ad7b62 | 5141a9446464e03df639093bb75307c3a421084b | /sim/materials/pitch/segment_03/pitch_handlers.py | edc792cea7b2c4d034c339a6744fc4dab648d58e | [] | no_license | GregoryREvans/sim | b32faaa4ec0288dfc03d33e27a46971c30bf6c33 | d9be48232c41365759d551137786627cff140abc | refs/heads/master | 2022-02-26T15:54:15.807251 | 2022-02-10T13:51:19 | 2022-02-10T13:51:19 | 248,852,915 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,623 | py | import evans
from sim.materials.pitch.segment_03.pitches import (
voice_1_chord_1,
voice_1_chord_2,
voice_1_chord_3,
voice_1_chord_4,
voice_1_chord_5,
voice_1_chord_6,
voice_1_chord_7,
voice_1_chord_8,
voice_2_chord_1,
voice_2_chord_2,
voice_2_chord_3,
voice_2_chord_4,
voice_2_chord_5,
voice_2_chord_6,
voice_2_chord_7,
voice_2_chord_8,
voice_3_chord_1,
voice_3_chord_2,
voice_3_chord_3,
voice_3_chord_4,
voice_3_chord_5,
voice_3_chord_6,
voice_3_chord_7,
voice_3_chord_8,
voice_4_chord_1,
voice_4_chord_2,
voice_4_chord_3,
voice_4_chord_4,
voice_4_chord_5,
voice_4_chord_6,
voice_4_chord_7,
voice_4_chord_8,
)
piano_pitch_handler_one_1 = evans.PitchHandler(
pitch_list=voice_1_chord_1, forget=False, name="voice_1_chord_1"
)
piano_pitch_handler_one_2 = evans.PitchHandler(
pitch_list=voice_1_chord_2, forget=False, name="voice_1_chord_2"
)
piano_pitch_handler_one_3 = evans.PitchHandler(
pitch_list=voice_1_chord_3, forget=False, name="voice_1_chord_3"
)
piano_pitch_handler_one_4 = evans.PitchHandler(
pitch_list=voice_1_chord_4, forget=False, name="voice_1_chord_4"
)
piano_pitch_handler_one_5 = evans.PitchHandler(
pitch_list=voice_1_chord_5, forget=False, name="voice_1_chord_5"
)
piano_pitch_handler_one_6 = evans.PitchHandler(
pitch_list=voice_1_chord_6, forget=False, name="voice_1_chord_6"
)
piano_pitch_handler_one_7 = evans.PitchHandler(
pitch_list=voice_1_chord_7, forget=False, name="voice_1_chord_7"
)
piano_pitch_handler_one_8 = evans.PitchHandler(
pitch_list=voice_1_chord_8, forget=False, name="voice_1_chord_8"
)
# ##
piano_pitch_handler_two_1 = evans.PitchHandler(
pitch_list=voice_2_chord_1, forget=False, name="voice_2_chord_1"
)
piano_pitch_handler_two_2 = evans.PitchHandler(
pitch_list=voice_2_chord_2, forget=False, name="voice_2_chord_2"
)
piano_pitch_handler_two_3 = evans.PitchHandler(
pitch_list=voice_2_chord_3, forget=False, name="voice_2_chord_3"
)
piano_pitch_handler_two_4 = evans.PitchHandler(
pitch_list=voice_2_chord_4, forget=False, name="voice_2_chord_4"
)
piano_pitch_handler_two_5 = evans.PitchHandler(
pitch_list=voice_2_chord_5, forget=False, name="voice_2_chord_5"
)
piano_pitch_handler_two_6 = evans.PitchHandler(
pitch_list=voice_2_chord_6, forget=False, name="voice_2_chord_6"
)
piano_pitch_handler_two_7 = evans.PitchHandler(
pitch_list=voice_2_chord_7, forget=False, name="voice_2_chord_7"
)
piano_pitch_handler_two_8 = evans.PitchHandler(
pitch_list=voice_2_chord_8, forget=False, name="voice_2_chord_8"
)
# ##
piano_pitch_handler_three_1 = evans.PitchHandler(
pitch_list=voice_3_chord_1, forget=False, name="voice_3_chord_1"
)
piano_pitch_handler_three_2 = evans.PitchHandler(
pitch_list=voice_3_chord_2, forget=False, name="voice_3_chord_2"
)
piano_pitch_handler_three_3 = evans.PitchHandler(
pitch_list=voice_3_chord_3, forget=False, name="voice_3_chord_3"
)
piano_pitch_handler_three_4 = evans.PitchHandler(
pitch_list=voice_3_chord_4, forget=False, name="voice_3_chord_4"
)
piano_pitch_handler_three_5 = evans.PitchHandler(
pitch_list=voice_3_chord_5, forget=False, name="voice_3_chord_5"
)
piano_pitch_handler_three_6 = evans.PitchHandler(
pitch_list=voice_3_chord_6, forget=False, name="voice_3_chord_6"
)
piano_pitch_handler_three_7 = evans.PitchHandler(
pitch_list=voice_3_chord_7, forget=False, name="voice_3_chord_7"
)
piano_pitch_handler_three_8 = evans.PitchHandler(
pitch_list=voice_3_chord_8, forget=False, name="voice_3_chord_8"
)
# ##
piano_pitch_handler_four_1 = evans.PitchHandler(
pitch_list=voice_4_chord_1, forget=False, name="voice_4_chord_1"
)
piano_pitch_handler_four_2 = evans.PitchHandler(
pitch_list=voice_4_chord_2, forget=False, name="voice_4_chord_2"
)
piano_pitch_handler_four_3 = evans.PitchHandler(
pitch_list=voice_4_chord_3, forget=False, name="voice_4_chord_3"
)
piano_pitch_handler_four_4 = evans.PitchHandler(
pitch_list=voice_4_chord_4, forget=False, name="voice_4_chord_4"
)
piano_pitch_handler_four_5 = evans.PitchHandler(
pitch_list=voice_4_chord_5, forget=False, name="voice_4_chord_5"
)
piano_pitch_handler_four_6 = evans.PitchHandler(
pitch_list=voice_4_chord_6, forget=False, name="voice_4_chord_6"
)
piano_pitch_handler_four_7 = evans.PitchHandler(
pitch_list=voice_4_chord_7, forget=False, name="voice_4_chord_7"
)
piano_pitch_handler_four_8 = evans.PitchHandler(
pitch_list=voice_4_chord_8, forget=False, name="voice_4_chord_8"
)
| [
"gregoryrowlandevans@gmail.com"
] | gregoryrowlandevans@gmail.com |
03b959614e50f787f0f04938891a9d27ef1ec31b | c50e7eb190802d7849c0d0cea02fb4d2f0021777 | /src/storage-blob-preview/azext_storage_blob_preview/tests/latest/test_storage_sas_scenarios.py | 7fe8ebfd5f1fd7662ee08735a0b96f88a2c80fb6 | [
"LicenseRef-scancode-generic-cla",
"MIT"
] | permissive | Azure/azure-cli-extensions | c1615b19930bba7166c282918f166cd40ff6609c | b8c2cf97e991adf0c0a207d810316b8f4686dc29 | refs/heads/main | 2023-08-24T12:40:15.528432 | 2023-08-24T09:17:25 | 2023-08-24T09:17:25 | 106,580,024 | 336 | 1,226 | MIT | 2023-09-14T10:48:57 | 2017-10-11T16:27:31 | Python | UTF-8 | Python | false | false | 7,018 | py | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from datetime import datetime, timedelta
from azure.cli.testsdk import (LiveScenarioTest, ResourceGroupPreparer, StorageAccountPreparer,
JMESPathCheck, JMESPathCheckExists, NoneCheck)
from ..storage_test_util import StorageScenarioMixin
class StorageSASScenario(StorageScenarioMixin, LiveScenarioTest):
@ResourceGroupPreparer()
@StorageAccountPreparer(name_prefix='blobsas', kind='StorageV2', location='eastus2euap')
def test_storage_blob_sas_permission_scenario(self, resource_group, storage_account):
"""
Test service SAS operations.
A service SAS is secured with the storage account key. A service SAS delegates access to a resource in only
one of the Azure Storage services: Blob storage, Queue storage, Table storage, or Azure Files.
"""
expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info)
local_file = self.create_temp_file(128, full_random=False)
blob_name = self.create_random_name('blob', 16)
self.kwargs.update({
'expiry': expiry,
'account': storage_account,
'container': container,
'local_file': local_file,
'blob': blob_name
})
# ----account key----
# test sas-token for a container
sas = self.storage_cmd('storage container generate-sas -n {} --https-only --permissions dlrwt --expiry {} -otsv',
account_info, container, expiry).output.strip()
self.kwargs['container_sas'] = sas
self.cmd('storage blob upload -c {container} -f "{local_file}" -n {blob} '
'--account-name {account} --sas-token "{container_sas}"')
# test sas-token for a blob
sas = self.storage_cmd('storage blob generate-sas -c {} -n {} --https-only --permissions acdrwt --expiry {} '
'-otsv', account_info, container, blob_name, expiry).output.strip()
self.kwargs['blob_sas'] = sas
self.cmd('storage blob upload -c {container} -f "{local_file}" -n {blob} --overwrite '
'--account-name {account} --sas-token "{blob_sas}" --tags test=tag ')
self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}') \
.assert_with_checks(JMESPathCheck('name', blob_name),
JMESPathCheck('tagCount', 1))
self.cmd('storage blob tag list -n {} -c {} --account-name {} --sas-token "{}" '.format(blob_name,
container, storage_account, sas)).assert_with_checks(JMESPathCheck('test', 'tag'))
# ----connection string----
connection_str = self.cmd('storage account show-connection-string -n {account} --query connectionString '
'-otsv').output.strip()
self.kwargs['con_str'] = connection_str
# test sas-token for a container
sas = self.cmd('storage container generate-sas -n {container} --https-only --permissions dlrw '
'--connection-string {con_str} --expiry {expiry} -otsv').output.strip()
self.kwargs['container_sas'] = sas
self.cmd('storage blob upload -c {container} -f "{local_file}" -n {blob} '
'--account-name {account} --sas-token "{container_sas}"')
# test sas-token for a blob
sas = self.cmd('storage blob generate-sas -c {container} -n {blob} --account-name {account} --https-only '
'--permissions acdrwt --expiry {expiry} -otsv').output.strip()
self.kwargs['blob_sas'] = sas
self.cmd('storage blob show -c {container} -n {blob} --account-name {account} --sas-token {blob_sas}') \
.assert_with_checks(JMESPathCheck('name', blob_name))
@ResourceGroupPreparer()
@StorageAccountPreparer()
def test_storage_blob_sas_permission_scenario(self, resource_group, storage_account):
"""
Test service SAS with stored access policy.
A stored access policy is defined on a resource container, which can be a blob container, table, queue,
or file share. The stored access policy can be used to manage constraints for one or more service shared
access signatures. When you associate a service SAS with a stored access policy, the SAS inherits the
constraints—the start time, expiry time, and permissions—defined for the stored access policy.
"""
expiry = (datetime.utcnow() + timedelta(hours=1)).strftime('%Y-%m-%dT%H:%MZ')
account_info = self.get_account_info(resource_group, storage_account)
container = self.create_container(account_info)
local_file = self.create_temp_file(128, full_random=False)
blob_name = self.create_random_name('blob', 16)
policy = self.create_random_name('policy', 16)
self.storage_cmd('storage container policy create -c {} -n {} --expiry {} --permissions acdlrw', account_info,
container, policy, expiry)
self.storage_cmd('storage container policy list -c {} ', account_info, container)\
.assert_with_checks(JMESPathCheckExists('{}.expiry'.format(policy)),
JMESPathCheck('{}.permission'.format(policy), 'racwdl'))
self.storage_cmd('storage container policy show -c {} -n {} ', account_info, container, policy, expiry)\
.assert_with_checks(JMESPathCheckExists('expiry'),
JMESPathCheck('permission', 'racwdl'))
sas = self.storage_cmd('storage blob generate-sas -n {} -c {} --policy-name {} -otsv ', account_info, blob_name,
container, policy).output.strip()
self.storage_cmd('storage blob upload -n {} -c {} -f "{}" --sas-token "{}" ', account_info, blob_name, container,
local_file, sas)
self.storage_cmd('storage container policy update -c {} -n {} --permissions acdlr', account_info, container,
policy)
self.storage_cmd('storage container policy show -c {} -n {} ', account_info, container, policy)\
.assert_with_checks(JMESPathCheckExists('expiry'),
JMESPathCheck('permission', 'racdl'))
self.storage_cmd('storage container policy delete -c {} -n {} ', account_info, container, policy)
self.storage_cmd('storage container policy list -c {} ', account_info, container) \
.assert_with_checks(NoneCheck())
| [
"noreply@github.com"
] | Azure.noreply@github.com |
49b68ae52f3b82b2b577dafc6dc5f37731d8f203 | 633d41919194063c5010a227d7c979e24947a850 | /solutions/polyfit/example.py | 3f5cdadca7f069edb331be1780ada7b7c1adf9fc | [] | no_license | Sumbrella/ele_project | 04e88d342025215acb2d1c74b88d17bc8a84f528 | 75a957b18f955466c97609d943b0cdd1f9143e29 | refs/heads/master | 2023-04-20T08:26:46.501941 | 2021-04-30T01:09:17 | 2021-04-30T01:09:17 | 309,329,533 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,131 | py | import matplotlib.pyplot as plt
import pandas as pd
from paddle import fluid
from paddle.fluid import ParamAttr, Pool2D, Conv2D, Linear, BatchNorm
from paddle.fluid.regularizer import L2Decay
from solutions.polyfit.get_reader import get_reader
from solutions.polyfit.teacher_change import change_teacher
class ConvBNLayer(fluid.dygraph.Layer):
"""
卷积 + 批归一化,BN层之后激活函数默认用leaky_relu
"""
def __init__(self,
num_channels,
num_filters,
filter_size=(1, 3),
stride=1,
groups=1,
padding=(0, 1),
act="leaky",
is_test=True):
super(ConvBNLayer, self).__init__()
self.conv = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
param_attr=ParamAttr(
initializer=fluid.initializer.Normal(0, 0.2)),
bias_attr=False,
act=None)
self.batch_norm = BatchNorm(
num_channels=num_filters,
is_test=is_test,
param_attr=ParamAttr(
initializer=fluid.initializer.Normal(0, 0.2),
regularizer=L2Decay(0.)),
bias_attr=ParamAttr(
initializer=fluid.initializer.Constant(100),
regularizer=L2Decay(0.)))
self.act = act
def forward(self, inputs):
out = self.conv(inputs)
out = self.batch_norm(out)
if self.act == 'leaky':
out = fluid.layers.leaky_relu(x=out, alpha=0.1)
return out
class AlexNet(fluid.dygraph.Layer):
def __init__(self, num_classes=9):
super(AlexNet, self).__init__()
self.conv1 = ConvBNLayer(num_channels=2, num_filters=96, filter_size=11, stride=4, padding=5, act='leaky_relu')
self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
self.conv2 = ConvBNLayer(num_channels=96, num_filters=256, filter_size=5, stride=1, padding=2, act='relu')
self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
self.conv3 = ConvBNLayer(num_channels=256, num_filters=384, filter_size=3, stride=1, padding=1, act='leaky_relu')
self.conv4 = ConvBNLayer(num_channels=384, num_filters=384, filter_size=3, stride=1, padding=1, act='leaky_relu')
self.conv5 = ConvBNLayer(num_channels=384, num_filters=256, filter_size=3, stride=1, padding=1, act='leaky_relu')
self.pool5 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')
self.fc1 = Linear(input_dim=256 * 3, output_dim=256 * 2, act='relu')
self.drop_ratio1 = 0.5
self.fc2 = Linear(input_dim=256 * 2, output_dim=256 * 1, act='relu')
self.drop_ratio2 = 0.5
self.fc3 = Linear(input_dim=256, output_dim=num_classes)
self.conv_layers = [self.conv1, self.pool1, self.conv2, self.pool2, self.conv3, self.conv4, self.conv5,
self.pool5]
def forward(self, x):
for layers in self.conv_layers:
x = layers(x)
# x = self.bn1(x)
x = fluid.layers.reshape(x, [x.shape[0], -1])
x = self.fc1(x)
x = fluid.layers.dropout(x, self.drop_ratio1)
x = self.fc2(x)
x = fluid.layers.dropout(x, self.drop_ratio2)
x = self.fc3(x)
return x
if __name__ == '__main__':
import numpy as np
from ele_common.units import SingleFile, Reader
train_dir = "../../data/train/before"
train_label_dir = "../../data/train/teacher"
test_dir = "../../data/test/before"
test_label_dir = "../../data/test/teacher"
mode = 'train'
epoch_num = 150
loss_function = fluid.layers.square_error_cost
batch_size = 10
bd = [1000, 2000, 3000]
value = [1.0, 0.5, 0.1, 0.05]
# lr = fluid.layers.piecewise_decay(boundaries=[1000, 2000, 3000], values=[1.0, 0.5, 0.1, 0.05])
lr = 1.0
l2 = fluid.regularizer.L2Decay(regularization_coeff=1e-2)
train_reader = get_reader(train_dir, train_label_dir, batch_size=batch_size)
test_reader = get_reader(test_dir, test_label_dir, batch_size=batch_size)
min_losses = 1000000
# =====
# tmp_data = np.random.randn(1, 2, 1, 100).astype('float64')
# tmp_data = tmp_data / 10000000
# with fluid.dygraph.guard():
# tmp_data = to_variable(tmp_data)
# net = EleNetwork()
# net(tmp_data)
# =====
train_losses = []
test_losses = []
with fluid.dygraph.guard():
model = AlexNet()
reader = Reader(train=train_reader, test=test_reader)
model.train()
optimizer = fluid.optimizer.Adam(learning_rate=0.1,
parameter_list=model.parameters(),
# regularization=l2,
)
for epoch in range(epoch_num):
for batch, data in enumerate(reader.train()):
imgs, labels = data
# imgs = np.log(imgs)
# change data label value
labels = change_teacher(labels)
imgs = fluid.dygraph.to_variable(imgs)
labels = fluid.dygraph.to_variable(labels)
logits = model(imgs)
loss = loss_function(logits, labels)
avg_loss = fluid.layers.mean(loss)
if batch % 10 == 0:
train_losses.append(avg_loss.numpy())
print(f"epoch:{epoch} batch:{batch} loss:{avg_loss.numpy()}")
if mode == 'debug':
print("label:", labels.numpy())
print("logits:", logits.numpy())
print("loss:", loss.numpy())
print("avg_loss:", avg_loss.numpy())
avg_loss.backward()
optimizer.minimize(avg_loss)
model.clear_gradients()
model.eval()
losses = []
for batch, data in enumerate(reader.test()):
imgs, labels = data
# change labels
labels = change_teacher(labels)
imgs = fluid.dygraph.to_variable(imgs)
labels = fluid.dygraph.to_variable(labels)
logits = model(imgs)
loss = loss_function(logits, labels)
avg_loss = fluid.layers.mean(loss)
losses.append(avg_loss.numpy())
if np.mean(losses) < min_losses:
min_losses = np.mean(losses)
fluid.save_dygraph(model.state_dict(), "min_polyfit")
fluid.save_dygraph(optimizer.state_dict(), "min_polyfit")
print(f"epoch:{epoch} test_result: loss | {np.mean(losses)}")
model.train()
fluid.save_dygraph(model.state_dict(), "polyfit")
fluid.save_dygraph(optimizer.state_dict(), "polyfit")
train_losses = pd.DataFrame(train_losses)
train_losses.plot()
plt.show()
| [
"493972246@qq.com"
] | 493972246@qq.com |
904407ba849625af9f9426cd1330c5e12de253d5 | 281e8cb45b010ea6173c2b3f6d7d0938c70f9603 | /Peng_agri_bishe/recommend_templates/views.py | f4a693df319afa53ff62516ce58b5f44ffee564e | [] | no_license | PGDIP/Agri | b75a3906a7c3c437cbb3763114430fe02eb061c0 | c6c4a3cc2aba1a4ce157e03d38db0a2681c4ef2d | refs/heads/master | 2021-05-04T19:36:42.599803 | 2017-12-03T07:28:28 | 2017-12-03T07:28:28 | 106,798,000 | 1 | 2 | null | null | null | null | UTF-8 | Python | false | false | 24,339 | py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render, render_to_response
from django.http import HttpResponse
from django.http import JsonResponse
from bson import ObjectId
import os
import json
from recommend_templates.Main.paserManager.newsContentPaser import *
from recommend_templates.Main.dbManager.mongoClass import MongoOperator
from recommend_templates.Main.recSys.knn import get_K_nearst_love
from recommend_templates.Main.recSys.arima import get_min_max_degree
from recommend_templates.pager import Pagination
from recommend_templates.Main.paserManager.newsContentPaser import paserWeather
from recommend_templates.models import *
ROOT_URL = os.getcwd()
# Create your views here.
# 主界面
def index(request, data={}):
# data = {"user_name":"jeje","user_pwd":"jeje"}
# return HttpResponse("Hello world! This is my first trial. [Poll的笔记]")
# return Http404
user_name = request.session.get('user_name')
data["user_name"] = user_name
print(data["user_name"],'//////////////////////////////////////')
return render(request, ROOT_URL + "/recommend_templates/templates/homepage.html", data) # 注意路径一定要写对
def test(request):
print ("class_1 ......")
try:
current_page = request.GET.get('p')
user_name = request.session.get('user_name')
rec_db = MongoOperator('localhost',27017,'AgriRecSys','news')
db_ans = rec_db.find({"class_name":"病虫害"} )
data={}
ans_list = []
count=0
for i,news in enumerate(list(db_ans),0):
ans_list.append( {
"news": news,
"news_id": str(news["_id"]),
#"href": "#href_id%d" % (i),
#"content_id": "href_id%d" % (i),
#"click_id": "ajax_id_%d" % (i),
#"ajax_id": "#ajax_id_%d" % (i),
} )
count+=1
page_obj = Pagination(count, current_page)
data_list = ans_list[page_obj.start():page_obj.end()]
data["user_name"] = user_name
except: return index(request)
return render(request, ROOT_URL +"/recommend_templates/templates/test.html",{'data':data_list,'page_obj':page_obj})
# 第一类新闻 农业新闻
def class_1(request, data={}):
# print "class_1 ......"
try:
user_name = request.session.get('user_name')
# rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'news')
# db_ans = rec_db.find({"class_name": "农业新闻"})
db_ans = page.objects.filter(class_name='农业新闻')
data = {}
ans_list = []
for i, news in enumerate(list(db_ans), 0):
ans_list.append({
"news": news,
"news_id": str(news["_id"]),
"href": "#href_id%d" % (i),
"content_id": "href_id%d" % (i),
"click_id": "ajax_id_%d" % (i),
"ajax_id": "#ajax_id_%d" % (i),
})
data["news_list"] = ans_list
data["user_name"] = user_name
except:
return index(request)
return render(request, ROOT_URL + "/recommend_templates/templates/class_1.html", data) # 注意路径一定要写对
# 第二类新闻 病虫害
def class_2(request):
# print "class_2 ......"
try:
current_page = request.GET.get('p')
user_name = request.session.get('user_name')
# rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'news')
# db_ans = rec_db.find({"class_name": "病虫害"})
db_ans = page.objects.filter(class_name = '水果病虫害防治')
data = {}
ans_list = []
count = 0
for i, news in enumerate(db_ans, 0):
content = str(news.content['content0'])
ans_list.append({
"news": news,
"news_id": news.id,
'content': content
# "href": "#href_id%d" % (i),
# "content_id": "href_id%d" % (i),
# "click_id": "ajax_id_%d" % (i),
# "ajax_id": "#ajax_id_%d" % (i),
})
count += 1
page_obj = Pagination(count, current_page)
data_list = ans_list[page_obj.start():page_obj.end()]
data["user_name"] = user_name
except:
return index(request)
return render(request, ROOT_URL + "/recommend_templates/templates/class_2.html", {'data': data_list, 'page_obj': page_obj})
def news(request,data={}):
return render(request,ROOT_URL+"/recommend_templates/templates/news.html",data)
# 第三类新闻 种植技术
def class_3(request):
# print "class_3 ......"
try:
current_page = request.GET.get('p')
user_name = request.session.get('user_name')
#rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'news')
#db_ans = rec_db.find({"class_name": "果蔬种植"})
db_ans = page.objects.filter(class_name='蔬菜种植技术')
data = {}
ans_list = []
count = 0
for i, news in enumerate(db_ans, 0):
content = str(news.content['content0'])
ans_list.append({
'content':content,
"news": news,
"news_id": news.id,
# "href": "#href_id%d" % (i),
# "content_id": "href_id%d" % (i),
# "click_id": "ajax_id_%d" % (i),
# "ajax_id": "#ajax_id_%d" % (i),
})
count += 1
page_obj = Pagination(count, current_page)
data_list = ans_list[page_obj.start():page_obj.end()]
data["user_name"] = user_name
except:
return index(request)
return render(request, ROOT_URL + "/recommend_templates/templates/class_3.html",{'data': data_list, 'page_obj': page_obj})
# 第四类新闻 市场价格
def class_4(request, data={}):
# print "class_4 ......"
try:
user_name = request.session.get('user_name')
rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'news')
db_ans = rec_db.find({"class_name": "市场价格"})
data = {}
ans_list = []
for i, news in enumerate(list(db_ans), 0):
ans_list.append({
"news": news,
"news_id": str(news["_id"]),
"href": "#href_id%d" % (i),
"content_id": "href_id%d" % (i),
"click_id": "ajax_id_%d" % (i),
"ajax_id": "#ajax_id_%d" % (i),
})
data["news_list"] = ans_list
data["user_name"] = user_name
except:
return index(request)
return render(request, ROOT_URL + "/recommend_templates/templates/class_4.html", data) # 注意路径一定要写对
# 第五类新闻 科技新闻
def class_5(request, data={}):
# print "class_5 ......"
try:
current_page = request.GET.get('p')
user_name = request.session.get('user_name')
#rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'news')
#db_ans = rec_db.find({"class_name": "政策法规"})
db_ans=page.objects.filter(class_name='科技要闻')
data = {}
ans_list = []
count = 0
for i, news in enumerate(list(db_ans), 0):
content=str(news.content['content0'])
ans_list.append({
"content":content,
"news": news,
"news_id": str(news["_id"]),
# "href": "#href_id%d" % (i),
# "content_id": "href_id%d" % (i),
# "click_id": "ajax_id_%d" % (i),
# "ajax_id": "#ajax_id_%d" % (i),
})
count += 1
page_obj = Pagination(count, current_page)
data_list = ans_list[page_obj.start():page_obj.end()]
data["user_name"] = user_name
except:
return index(request)
return render(request, ROOT_URL + "/recommend_templates/templates/class_2.html",{'data': data_list, 'page_obj': page_obj})
def myRecommend(request, data={}):
# print "myRecommend ......"
user_name = request.session.get('user_name')
try:
rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'user')
db_ans = rec_db.find({"user_name": user_name})[0]
new_id_list = db_ans.get("looked_list")
if new_id_list == None: # 面对冷启动问题
pass #
else:
# print "========*****#######*********"
rec_new_id_list = get_K_nearst_love(8, new_id_list) # 推荐5个最优新闻名称给用户
# print "========**************",new_id_list
ans_list = []
for i, news_id in enumerate(rec_new_id_list, 0):
rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'news')
db_ans = rec_db.find({"_id": ObjectId(news_id)})
if db_ans.count() == 0: continue;
db_ans = db_ans[0]
ans_list.append({
"news": db_ans,
"news_id": str(db_ans["_id"]),
"href": "#href_id%d" % (i),
"content_id": "href_id%d" % (i),
"click_id": "ajax_id_%d" % (i),
"ajax_id": "#ajax_id_%d" % (i),
})
data["user_name"] = user_name
data["news_list"] = ans_list
except:
return index(request)
return render(request, ROOT_URL + "/recommend_templates/templates/myRecommend.html", data) # 注意路径一定要写对
def history(request):
# print "history ......"
data = {}
user_name = request.session.get('user_name')
try:
rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'user')
db_ans = rec_db.find({"user_name": user_name})[0]
new_id_list = db_ans.get("looked_list")
ans_list = []
for i, news_id in enumerate(new_id_list, 0):
rec_d = MongoOperator('localhost', 27017, 'AgriRecSys', 'news')
db_ans = rec_d.find({'_id': ObjectId(news_id)})
if db_ans.count() == 0:
continue
db_ans = db_ans[0]
ans_list.append({
"news": db_ans,
"news_id": str(db_ans["_id"]),
"href": "#href_id%d" % (i),
"content_id": "href_id%d" % (i),
"click_id": "ajax_id_%d" % (i),
"ajax_id": "#ajax_id_%d" % (i),
})
data["user_name"] = user_name
data["news_list"] = ans_list
except:
return index(request)
return render(request, ROOT_URL + "/recommend_templates/templates/history.html", data) # 注意路径一定要写对
# 获取用户对某一个新闻的点击 ajax技术
def count_click_times(request):
# print "count_click_times ...... "
if request.POST:
news_id = request.POST.get('news_id')
user_name = request.POST.get('user_name')
else:
news_id = request.GET.get('news_id')
user_name = request.GET.get('user_name')
try:
rec_db = MongoOperator('localhost', 27017, 'AgriRecSys', 'user')
db_ans = rec_db.find({"user_name": user_name})[0]
if db_ans.get("looked_list") is None:
looked_list = set([news_id])
else:
looked_list = set(list(db_ans["looked_list"]))
looked_list = looked_list | set([news_id])
# print news_id, user_name
rec_db.update(
{'user_name': user_name},
{'$set': {"looked_list": list(looked_list)}},
)
except:
return index(request)
def shift_title_bar(request):
# print "shift_title_bar ...... "
data = []
if request.POST:
bar_name = request.POST.get('bar_name')
user_name = request.POST.get('user_name')
else:
bar_name = request.GET.get('bar_name')
user_name = request.GET.get('user_name')
data["bar_name"] = bar_name
data["user_name"] = user_name
return JsonResponse(data)
# 天气栏
def weather(request):
# print "weather ......"
try:
pred_min_list, pred_max_list = get_min_max_degree()
user_name = request.session.get('user_name')
data = {};
ans_list = []
weather_list = paserWeather() # [{},{},{},...]
for i, weather in enumerate(weather_list, 0):
if i >= 7: break;
ans_list.append({
"key": i,
"content": weather,
})
data["weather_data"] = ans_list
data["user_name"] = user_name
data["pred_max"] = ["%.2f" % (degree) for degree in pred_max_list[1:]]
data["pred_min"] = ["%.2f" % (degree) for degree in pred_min_list[1:]]
except:
return index(request)
return render(request, ROOT_URL + "/recommend_templates/templates/weather.html", data) # 注意路径一定要写对
##############################################################################################################################################
def weatherTest(request):
# from recommend_templates.Main.paserManager.weather import Weather
# weather = Weather()
# weather_list = weather.getWeather() #json.dumps(weather.getWeather(),ensure_ascii=False,encoding="utf-8") #unicode 转换为 utf-8
# print(json.dumps(weather_list[0],ensure_ascii=False,encoding="utf-8"))
# print(json.dumps(weather_list[1],ensure_ascii=False,encoding="utf-8"))
today = {"temp_low": "14", "weather": "小雨", "weatid1": "", "humi_low": "0", "humi_high": "0", "temp_curr": "16", "temperature": "18℃/14℃", "cityid": "101270101", "windid": "20", "weaid": "265", "week": "星期五", "temperature_curr": "16℃", "weather_icon1": "", "winpid": "201", "weatid": "2", "weather_curr": "多云", "citynm": "成都", "cityno": "chengdu", "days": "2017-10-13", "humidity": "85%", "weather_icon": "http://api.k780.com/upload/weather/d/1.gif", "temp_high": "18", "winp": "1级", "wind": "北风"}
seven_day = [{"week": "星期五", "weather_icon": "http://api.k780.com/upload/weather/d/7.gif", "humi_high": "0", "humi_low": "0", "temperature": "18℃/14℃", "weatid": "8", "temp_low": "14", "citynm": "成都", "cityno": "chengdu", "winpid": "125", "days": "2017-10-13", "humidity": "0%/0%", "cityid": "101270101", "weather_icon1": "http://api.k780.com/upload/weather/n/7.gif", "weather": "小雨", "temp_high": "18", "weatid1": "8", "windid": "124", "winp": "微风", "wind": "无持续风向", "weaid": "265"}, {"week": "星期六", "weather_icon": "http://api.k780.com/upload/weather/d/7.gif", "humi_high": "0", "humi_low": "0", "temperature": "18℃/15℃", "weatid": "8", "temp_low": "15", "citynm": "成都", "cityno": "chengdu", "winpid": "125", "days": "2017-10-14", "humidity": "0%/0%", "cityid": "101270101", "weather_icon1": "http://api.k780.com/upload/weather/n/7.gif", "weather": "小雨", "temp_high": "18", "weatid1": "8", "windid": "124", "winp": "微风", "wind": "无持续风向", "weaid": "265"}, {"week": "星期日", "weather_icon": "http://api.k780.com/upload/weather/d/7.gif", "humi_high": "0", "humi_low": "0", "temperature": "18℃/14℃", "weatid": "8", "temp_low": "14", "citynm": "成都", "cityno": "chengdu", "winpid": "125", "days": "2017-10-15", "humidity": "0%/0%", "cityid": "101270101", "weather_icon1": "http://api.k780.com/upload/weather/n/7.gif", "weather": "小雨", "temp_high": "18", "weatid1": "8", "windid": "124", "winp": "微风", "wind": "无持续风向", "weaid": "265"}, {"week": "星期一", "weather_icon": "http://api.k780.com/upload/weather/d/7.gif", "humi_high": "0", "humi_low": "0", "temperature": "18℃/13℃", "weatid": "8", "temp_low": "13", "citynm": "成都", "cityno": "chengdu", "winpid": "125", "days": "2017-10-16", "humidity": "0%/0%", "cityid": "101270101", "weather_icon1": "http://api.k780.com/upload/weather/n/7.gif", "weather": "小雨", "temp_high": "18", "weatid1": "8", "windid": "124", "winp": "微风", "wind": "无持续风向", "weaid": "265"}, {"week": "星期二", "weather_icon": "http://api.k780.com/upload/weather/d/7.gif", "humi_high": "0", "humi_low": "0", "temperature": "19℃/13℃", "weatid": "8", "temp_low": "13", "citynm": "成都", "cityno": "chengdu", "winpid": "125", "days": "2017-10-17", "humidity": "0%/0%", "cityid": "101270101", "weather_icon1": "http://api.k780.com/upload/weather/n/7.gif", "weather": "小雨", "temp_high": "19", "weatid1": "8", "windid": "124", "winp": "微风", "wind": "无持续风向", "weaid": "265"}, {"week": "星期三", "weather_icon": "http://api.k780.com/upload/weather/d/7.gif", "humi_high": "0", "humi_low": "0", "temperature": "21℃/14℃", "weatid": "8", "temp_low": "14", "citynm": "成都", "cityno": "chengdu", "winpid": "125", "days": "2017-10-18", "humidity": "0%/0%", "cityid": "101270101", "weather_icon1": "http://api.k780.com/upload/weather/n/1.gif", "weather": "小雨转多云", "temp_high": "21", "weatid1": "2", "windid": "124", "winp": "微风", "wind": "无持续风向", "weaid": "265"}, {"week": "星期四", "weather_icon": "http://api.k780.com/upload/weather/d/7.gif", "humi_high": "0", "humi_low": "0", "temperature": "27℃/19℃", "weatid": "8", "temp_low": "19", "citynm": "成都", "cityno": "chengdu", "winpid": "125", "days": "2017-10-19", "humidity": "0%/0%", "cityid": "101270101", "weather_icon1": "http://api.k780.com/upload/weather/n/7.gif", "weather": "小雨", "temp_high": "27", "weatid1": "8", "windid": "124", "winp": "微风", "wind": "无持续风向", "weaid": "265"}]
if request.method == 'POST':
# today = request.POST.get('today')
# seven_day = request.POST.get('seven_day')
weather = request.POST.get('weather')
if weather == 'seven_day':
return JsonResponse({'code':'7','weather_list':seven_day}) #weather_list[1]
return render(request,'weatherTest.html',{'weather':today,'user_name':request.session.get('user_name')}) #weather_list[0]
def login(request):
data = {}
if request.method == 'POST':
userName = request.POST.get('user')
pwd = request.POST.get('password')
# if userName != '':
# if User.objects.filter(username=userName):
# data['code'] = '200'
# #return HttpResponse(json.dumps(data),content_type='application/json')
# else:
# data ['code'] = '404'
# #return HttpResponse(json.dumps(data), content_type='application/json')
# else:
# data['isEmpty'] = '0'
if userName != '' and pwd != '':
if User.objects.filter(username=userName, password=str(pwd)): # 如果用户名和密码不能对上号,就返回主页
request.session['is_login'] = True
request.session['user_name'] = userName
# return index(request)
data['correctCode'] = '1'
else:
data['errorCode'] = '0'
else:
data['emptyCode'] = '0'
return JsonResponse(data)
return render_to_response('loginTest.html')
def register(request):
selectList = []
if request.method == 'POST':
user = request.POST.get('user') #标签里面的 name 属性
pwd = request.POST.get('password')
age = request.POST.get('age')
email = request.POST.get('email')
address = request.POST.get('address')
select1 = request.POST.get('select1')
select2 = request.POST.get('select2')
select3 = request.POST.get('select3')
select4 = request.POST.get('select4')
selectList =[select1,select2,select3,select4]
user1 = re.findall(r'^[A-Za-z]+[_A-Za-z0-9]*|^[1-9][0-9]{10,10}$', str(user))
pwd1 = re.findall(r'^[_.#*@%&A-Za-z0-9]{6,20}$',str(pwd))
email1 = re.findall(r'^[\w!#$%&\'*+/=?^_`{|}~-]+(?:\.[\w!#$%&\'*+/=?^_`{|}~-]+)*@(?:[\w](?:[\w-]*[\w])?\.)+[\w](?:[\w-]*[\w])?$',str(email))
age1 = re.findall(r'^[0-9]\d*$',str(age))
isExist = User.objects.filter(username=user)
code = {}
if isExist:
code['existCode'] = '200'
#return HttpResponse('<script>alert("账号已存在")</script>')
else:
if user1 and pwd1:
if email != '':
if not email1:
code['inputErrorCode'] = '404'
return JsonResponse(code)
#return HttpResponse('<script>alert("输入有误")</script>')
if age != '':
if not age1:
code['inputErrorCode'] = '404'
return JsonResponse(code)
#return HttpResponse('<script>alert("输入有误")</script>')
code['successCode'] = 'true'
user = User(username=user, password=pwd, age=str(age), email=email, address=address,user_love=selectList)
user.save()
#return render(request, 'loginTest.html')
else:
code['inputErrorCode'] = '404'
#return HttpResponse('<script>alert("输入有误")</script>')
return JsonResponse(code)
return render_to_response('registerTest.html')
# if isExist:
# code['existCode'] = '200'
# return HttpResponse('<script>alert("账号已存在")</script>')
# else:
# if user1 and pwd1:
# if email != '':
# if not email1:
# code['inputErrorCode'] = '404'
# return HttpResponse('<script>alert("输入有误")</script>')
# if age != '':
# if not age1:
# code['inputErrorCode'] = '404'
# return HttpResponse('<script>alert("输入有误")</script>')
# user = User(username=user, password=pwd, age=str(age), email=email, address=address, user_love=selectList)
# user.save()
# return render(request, 'loginTest.html')
# else:
# code['inputErrorCode'] = '404'
# return HttpResponse('<script>alert("输入有误")</script>')
# return render_to_response('registerTest.html')
def register_ajax(request):
if request.method == 'POST':
user = request.POST.get('user')
pwd = request.POST.get('password')
age = request.POST.get('age')
email = request.POST.get('email')
result = {}
#print('账号:%s 密码:%s 年龄:%s 邮箱:%s' % (user, pwd, age, email))
if pwd != '':
if re.findall(r'^[_.#*@%&A-Za-z0-9]{6,20}$',str(pwd)):
result['pCode'] = '200'
else:
result['p_msg'] = '密码包含特殊符号、或长度小于6'
result['pCode'] = '404'
else:
result['pCodeEmpty'] = '0'
if email != '':
if re.findall(r'^[\w!#$%&\'*+/=?^_`{|}~-]+(?:\.[\w!#$%&\'*+/=?^_`{|}~-]+)*@(?:[\w](?:[\w-]*[\w])?\.)+[\w](?:[\w-]*[\w])?$',str(email)):
result['eCode'] = '200'
else:
result['e_msg'] = '邮箱格式不正确'
result['eCode'] = '404'
else:
result['eCodeEmpty'] = '0'
if age != '':
if re.findall(r'^[0-9]\d*$',str(age)):
result['aCode'] = '200'
else:
result['a_msg'] = '年龄必须是数字'
result['aCode'] = '404'
else:
result['aCodeEmpty'] = '0'
if user != '':
if re.findall(r'^[A-Za-z]+[_A-Za-z0-9]*|^[1-9][0-9]{10,10}$', str(user)):
result['uCode'] = '200'
if User.objects.filter(username=user):
result['code'] = '200'
result['msg'] = '账号已经被使用了'
else:
result['code'] = '404'
else:
result['u_msg'] = '账号必须是电话号码、或者字母开头的可包含数字和下划线的字符串'
result['uCode'] = '404'
else:
result['uCodeEmpty'] = '0'
return JsonResponse(result)
# 用户提交登出
def subLogout(request):
# print "subLogout ......
request.session['is_login'] = False
request.session['user_name'] = None
return index(request)
def article(request,pageId):
article = page.objects.filter(pageId = pageId).first()
return render(request,'test.html',{'articles':article})
| [
"tm_private@163.com"
] | tm_private@163.com |
7fff07e8e9a7785d1ba743fc0524887fedbeb912 | a4425d446f87ffc8c01ba32e084c4f5402b56d15 | /0918/Testif2.py | 6e9601fd358cb544bae6874bd4dc46f9d2202c3b | [] | no_license | 1i1y/PythonSpace | dd79031d61238caa1cd3943396c5a88c9cac9030 | e09c1f3703b4da6a4c0166f1c438672658506652 | refs/heads/master | 2021-08-31T04:47:54.670155 | 2017-12-20T12:16:39 | 2017-12-20T12:16:39 | 114,877,048 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | #課堂作業,判斷輸入是基偶數(V)
score1=int(input('請輸入英文成績:'))
score2=int(input('請輸入數學成績:'))
if score1 >=60: #if else 後面是"分號"
if score2>=60:
print('都及格,了不起喔~來戰啊!!!!!')
else:
print('我知道這次數學很難,拍拍~')
if score1 >=60:
if score2>=60:
print('及格~及格~灑花~花~')
else:
print('這世界英文不及格,什麼都不是~') | [
"l2006109@gmail.com"
] | l2006109@gmail.com |
b1aa4a9e059c4f35593ba220bf587e0bdcc9389e | fba5505857a794c93a50f2b2625f39927d5a6e2b | /aio_pyorient/client.py | 1aecfe249aa4822ba5aa72162a0ae3d0756092c8 | [
"Apache-2.0"
] | permissive | tjtimer/aio_pyorient | 48562c766fe1a01eccb474e73e444f7417e773ec | 4e9a7244c71e27bc948fe8df863612eafc9743de | HEAD | 2018-07-16T11:32:30.863246 | 2018-06-01T16:32:13 | 2018-06-01T16:32:13 | 121,144,063 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,159 | py | import asyncio
from pprint import pprint
from aio_pyorient.message import db, server, command
from aio_pyorient.message.base import int_packer
from aio_pyorient.schema.prop_types import TYPE_MAP, var_int
from aio_pyorient.odb_types import ODBClusters
from aio_pyorient.sock import ODBSocket
from aio_pyorient.utils import AsyncCtx
try:
import uvloop
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy())
except ImportError:
pass
class ODBClient(AsyncCtx):
"""
ODBClient
Use this to talk to your OrientDB server.
"""
def __init__(self,
client_id: str='',
session_id: int=-1,
auth_token: bytes = b'',
db_name: str=None,
clusters: ODBClusters=None,
cluster_conf: bytes=b'',
server_version: str= '',
protocol: int=None,
serialization_type="ORecordDocument2csv",
host: str = 'localhost',
port: int = 2424, **kwargs):
super().__init__(**kwargs)
self._sock = ODBSocket(host=host, port=port)
self._id = client_id
self._session_id = session_id
self._auth_token = auth_token
self._db_name = db_name
if clusters is None or not isinstance(clusters, ODBClusters):
clusters = ODBClusters()
self._clusters = clusters
self._cluster_conf = cluster_conf
self._server_version = server_version
self._protocol = protocol
# "ORecordSerializerBinary" or "ORecordDocument2csv"
self._serialization_type = serialization_type
self._is_ready.set()
@property
def is_ready(self):
return self._sock.connected and self._is_ready.is_set()
@property
def protocol(self):
return self._protocol
@property
def session_id(self):
return self._session_id
@property
def auth_token(self):
return self._auth_token
@property
def db_opened(self):
return self._db_name
@property
def clusters(self):
return self._clusters
@property
def cluster_conf(self):
return self._cluster_conf
@property
def server_version(self):
return self._server_version
async def _shutdown(self):
await self._sock.shutdown()
async def connect(self, user: str, password: str, **kwargs):
handler = server.ServerConnect(self, user, password, **kwargs)
print('connect handler')
pprint(vars(handler))
await handler.send()
return await handler.read()
async def create_db(self, db_name: str, *, db_type: str='graph', storage_type: str='plocal', **kwargs):
handler = db.CreateDb(self, db_name,
db_type=db_type, storage_type=storage_type, **kwargs)
await handler.send()
return await handler.read()
async def open_db(self, db_name: str, user: str, password: str, **kwargs):
handler = await db.OpenDb(self, db_name, user, password, **kwargs).send()
return await handler.read()
async def reload_db(self, **kwargs):
handler = await db.ReloadDb(self, **kwargs).send()
return await handler.read()
async def close_db(self, **kwargs):
handler = await db.CloseDb(self, **kwargs).send()
return await handler.read()
async def db_exist(self, db_name: str, *, storage_type: str='plocal', **kwargs):
handler = await db.DbExist(self, db_name, storage_type=storage_type, **kwargs).send()
return await handler.read()
async def db_size(self, **kwargs):
handler = await db.DbSize(self, **kwargs).send()
return await handler.read()
async def db_record_count(self, **kwargs):
handler = await db.DbRecordCount(self, **kwargs).send()
return await handler.read()
async def db_schema(self):
response = await self.execute("select from #0:1")
return response
async def execute(self, query: str, **kwargs):
handler = await command.Query(self, query, **kwargs).send()
return await handler.read()
| [
"tjtimer@gmail.com"
] | tjtimer@gmail.com |
5ee111259f247c1ac6fe059965950e158e5ad6f4 | 2a2e3df106ddda5bc1bf31e2e25437f01f142693 | /Aplayground.py | a6b1600313b03f7c039ec7da9d45fc682bf3e775 | [] | no_license | timavis/SportMngSys | c73063d37d91bf1bde75406ef8890b1e338318ec | f2e403e5d4babf0a6ce1ca844b6ffe163f966bec | refs/heads/master | 2021-07-04T03:07:09.113615 | 2017-09-26T10:57:14 | 2017-09-26T10:57:14 | 106,343,056 | 1 | 0 | null | 2017-10-09T22:47:53 | 2017-10-09T22:47:52 | null | UTF-8 | Python | false | false | 236 | py | import random
teamList = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 11, 12, 13, 14]
random.shuffle(teamList)
count = 4
left = len(teamList) % count
each = len(teamList) // count
print('|-- ' + str(left))
print('|-- ' + str(each))
print(35/9)
| [
"timshi2013@gmail.com"
] | timshi2013@gmail.com |
94e78a5cbb0e034f47c33500f29b0f9f94042e81 | 346497fed60350124af7656cfa86399bfe5baa0a | /parse_dir.py | 065743cdfc35894614c11a935d9b70c1f95ee65a | [] | no_license | TBSDrJ/Quick-Scripts | 0adcb2dd364df5e53cae92f477910f9eef9f5013 | 0dd7bb9287a822b421a6b5552102a9f76ea7e55f | refs/heads/main | 2023-07-15T12:29:08.097170 | 2023-06-29T01:45:44 | 2023-06-29T01:45:44 | 248,335,793 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 711 | py | #!/usr/bin/python3
import os
MY_HOME_FOLDER = os.environ['HOME']
dirFile = open(MY_HOME_FOLDER + '/Downloads/dir.txt', 'r')
bigFiles = []
# For testing, look at only the first chunk of files
# tmpCounter = 0
for line in dirFile:
# tmpCounter += 1
# if tmpCounter > 10:
# break
# To separate out files from other lines
entry = line.split()
# If this is true, we probably have a line with a file
if len(entry) > 8:
# Field [4] will have the file size
# But sometimes maybe not, skip the others
# Right now, print lines with files over 10M
try:
if int(entry[4]) > 10000000:
print(entry)
except:
pass
| [
"marcus.jaiclin@bishops.com"
] | marcus.jaiclin@bishops.com |
d6ef0836fa61705a7de8944f9c6899860c6811c0 | eb1de8fe7b6f0d2d598317c4c893d4e31e62e0d3 | /events/migrations/0008_auto_20210701_2221.py | ba11161c573e4dff9370fbaad8ee00cdb84cb37f | [] | no_license | Festorah/partake | 91e0c0e3a6b23c6f70469da2fc5804e7f2c15a01 | fedb3e5cf84e0fee9559567baa7b7389e88a946c | refs/heads/main | 2023-06-16T01:10:05.889171 | 2021-07-15T17:37:32 | 2021-07-15T17:37:32 | 386,363,083 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 542 | py | # Generated by Django 3.2 on 2021-07-01 21:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('events', '0007_auto_20210525_1309'),
]
operations = [
migrations.AlterField(
model_name='event',
name='end_date',
field=models.DateTimeField(blank=True),
),
migrations.AlterField(
model_name='event',
name='start_date',
field=models.DateTimeField(blank=True),
),
]
| [
"festusoyebami@gmail.com"
] | festusoyebami@gmail.com |
c0aeb537a3746a1fd86d34cb9b84507d15349fce | 527fd39d3a1555800c2c32025fdd15fd86ba6672 | /make_Flexible_Finction/args.py | 0c2e15b007ee07cc6e3d18abdd7e1d7b7d6597f0 | [] | no_license | rohanwarange/Python-Tutorials | cfd39551f7ff62bd032946976ba3820474e42405 | 53d8fb226f94d027ae7999f9678697206d37d83a | refs/heads/master | 2023-06-18T10:45:36.884324 | 2021-07-07T17:44:22 | 2021-07-07T17:44:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 187 | py | def total(a,b):
return a+b
print(total(5,5))
# *args
def total(*args):
print(args)
total=0
for num in args:
total+=num
return total
print(total(1,2,3,4,5)) | [
"rohanwarange24@gmail.com"
] | rohanwarange24@gmail.com |
9aeb88b29c688c5453db81457fcaf6a9eb9b895b | 8427c70565103f6e8d770d1a13242ce346172852 | /DataPreProcess/TopoConnection.py | c9ef4604f9150fa005856acedf815b6437231561 | [] | no_license | fengjiachen/TestPython | 9cda97f9c36a674b9a27c1675eb21879a72c46e6 | 73a8847343b076b1e21a20e000c8f3d2578f8bd3 | refs/heads/master | 2020-03-19T20:04:13.920460 | 2018-06-10T07:32:19 | 2018-06-10T07:32:19 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | # encoding=utf-8
from DataPreProcess.Function import *
file = "C:\\Users\\wbl\\Desktop\\ETC\\StationOut\\济南西.txt"
currentFile = open(file, 'r', encoding='utf-8')
stationMap = {}
for eachLine in currentFile:
stationIn = eachLine.strip().split(',')[1]
Function.insertRecords(stationIn, stationMap, 1)
stationItem = sorted(stationMap.items(), key=lambda k: k[1], reverse=True)
for item in stationItem:
print(item[0] + '\t' + str(item[1]))
| [
"1046540692@qq.com"
] | 1046540692@qq.com |
149603acad9d00af9deaf8e518f47c9cfbceac66 | 3d909977c654dea4b7ea68499c9bb0f4c96a3253 | /p5/ud120-projects-master/outliers/outlier_cleaner.py | b4d4e69c611f9edafa2a0162fff4ccadb25919a9 | [] | no_license | shaojiewei/Udacity-Data-Analyst | 51e00acede8e282bc2cef05491136baca564927d | 694016349bba1aad155f02c0f14ff2062d7352d9 | refs/heads/master | 2020-05-30T10:47:33.391676 | 2017-08-08T03:46:55 | 2017-08-08T03:46:55 | 82,625,650 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 720 | py | #!/usr/bin/python
def outlierCleaner(predictions, ages, net_worths):
"""
Clean away the 10% of points that have the largest
residual errors (difference between the prediction
and the actual net worth).
Return a list of tuples named cleaned_data where
each tuple is of the form (age, net_worth, error).
"""
cleaned_data = []
### your code goes here
error = list((net_worths - predictions)**2)
cleaned_data = zip(ages, net_worths, error) # return a list of tuple
sorted_data = sorted(cleaned_data, key = lambda tup: tup[2])
num_retain = int(len(sorted_data) * .9)
cleaned_data = sorted_data[:num_retain]
return cleaned_data
| [
"1205873089@qq.com"
] | 1205873089@qq.com |
a1a00c10aec02bcae0e423f75aae53d303169aa1 | aee8cd408ae80e2059c84d2f83c7da8ee83990b4 | /Problems/0289-Game of Life.py | c19873d6c428c360bdb22e101f9635b001c85ab1 | [] | no_license | KirkGuo/LeetCode-Accepted-Code | 652a40a88888e8190e5b2912fa04008e82e32080 | ca445222ac0c513070123693977d8e1570c077f5 | refs/heads/master | 2021-06-25T03:47:25.586772 | 2021-03-09T02:39:20 | 2021-03-09T02:39:20 | 208,901,136 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,081 | py | class Solution:
def gameOfLife(self, board: List[List[int]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
def cntNeighbor(x, y, m, n):
type_cell = {1:1, 0:0, -1:0, -2:1}
ans = 0
for i in range(x-1, x+2):
for j in range(y-1, y+2):
if i<0 or i>=m or j<0 or j>=n or (i==x and j==y):
continue
ans += type_cell[board[i][j]]
return ans
m, n = len(board), len(board[0])
for x in range(m):
for y in range(n):
neighbor = cntNeighbor(x, y, m, n)
if board[x][y]==1 and (neighbor<2 or neighbor>3):
board[x][y] = -2
elif board[x][y]==0 and neighbor==3:
board[x][y] = -1
for x in range(m):
for y in range(n):
if board[x][y] == -1:
board[x][y] = 1
elif board[x][y] == -2:
board[x][y] = 0
| [
"noreply@github.com"
] | KirkGuo.noreply@github.com |
b99ba902cf48d891d8a7c0b595e6414a78b29f4f | d9443fb670ca99c98894b5f70c12dd165b9d8d99 | /view/appView.py | b3e8632c9e305f54903d80da4a2f6e75b2a5e900 | [] | no_license | PaarthB/Zendesk-Coding-Challenge-2017 | edb1e9a058f1d3d7da44b2cfe98dd71fecba183c | 56169e2f79fdc18fb38edd7b0e16f1d817b2ce81 | refs/heads/master | 2021-03-19T15:43:36.054674 | 2017-07-21T11:29:10 | 2017-07-21T11:29:10 | 96,493,417 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,869 | py | """
A Passive View in the view package for the MVC pattern. Its ticket data is what the controller passes to it.
It displays dynamic program messages & ticket information on the CLI screen.
"""
import math
class AppView:
def __init__(self):
self.page_limit = 25
self.errorCode = None
def startMessage(self): # Displays Start message on CLI screen
print("\n\n-------------------------WELCOME TO ZENDESK TICKET VIEWER-------------------------")
print("This application lets you view tickets and their details on your zendesk account")
print("Please enter a command, to view command options, type 'menu': ", end="")
return 0
def displayBadRequest(self, message): # Displays bad request message on CLI screen
if self.errorCode is not None:
print("\nBad request. Error getting data from API. Error Code:", self.errorCode)
print(message)
return 1
def displayInputMessage(self, message, type):
print(message, end="")
return type # Returns 0 on input prompt type messages, returns 1 on input error type messages
def printMenu(self): # Displays Command Menu on CLI Screen
print("\nCommand Options:")
print("Enter 1 to display all tickets")
print("Enter 2 to display single ticket")
print("Enter q to exit application")
print("Enter 'menu' to display Command Menu")
print("\nEnter your choice: ", end="")
return 0
def quit(self): # Displays quit message and quits the App.
print("\nExiting Zendesk Ticket Viewer. . . . . .")
print("Exiting successful, see you soon.\n")
return 0
def fetchTickets(self, ticketID): # Displays loading tickets message on CLI screen
if ticketID == "all":
print("\nFetching tickets, please wait . . . . .")
else:
print("\nFetching ticket", ticketID + ",", "please wait . . . . .")
return 0
def displayTickets(self, ticketsJSON, pageNo): # Displays tickets details with pagination on CLI screen
ticketsArr = ticketsJSON["tickets"]
# rounding up ticket pages
totalPages = math.ceil(float(len(ticketsArr)) / float(self.page_limit))
# circular rotation of pages after limit or before start
if pageNo > totalPages:
pageNo = 1
elif pageNo < 1:
pageNo = totalPages
pageTickets = 0
ticketOffset = (pageNo - 1) * self.page_limit
print("")
for i in range(int(ticketOffset), int(self.page_limit + ticketOffset)):
if i < len(ticketsArr):
if ticketsArr[i]["id"] is None:
continue
else:
print("<" + ticketsArr[i]["status"] + ">", "Ticket", ticketsArr[i]["id"], "opened by",
ticketsArr[i]["requester_id"], "updated at", ticketsArr[i]["updated_at"])
pageTickets += 1
print("\nDisplaying", pageTickets, "tickets on page", pageNo, "of", totalPages)
print("\nEnter 'd' to go down, 'u' to go up, 'menu' for menu and 'q' for quit: ", end="")
return pageNo # Current page no
def displayTicket(self, ticketsJSON): # Displays one ticket details on CLI screen
if "ticket" in ticketsJSON:
print("\n" + "<" + ticketsJSON["ticket"]["status"] + ">", "Ticket", ticketsJSON["ticket"]["id"], "subject", "'" +
ticketsJSON["ticket"]["subject"] + "'", "opened by", ticketsJSON["ticket"]["requester_id"], "updated at",
ticketsJSON["ticket"]["updated_at"])
print("\nPlease enter a command, to view command menu, type 'menu': ", end="")
return 0
else:
return 1
| [
"noreply@github.com"
] | PaarthB.noreply@github.com |
26161a885802699bfcf493fb91b66b866e6dffcf | 5010892594ea4c8f968048252c7355068f546129 | /test/test_smoke_cal.py | 0556dd73b7d04a53f787127bfbc8a7bb149eeac0 | [] | no_license | swapnali2020/New-Rent-2020 | 6c5b572320b411a1e5f79e7595039e66695b30b8 | e4799e82dae2ab5189e5635dc21d93cdf8c3e78b | refs/heads/main | 2023-01-22T09:57:18.597471 | 2020-12-04T10:27:17 | 2020-12-04T10:27:17 | 318,479,647 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 334 | py | from Framework_RentCalculator_Project.src.app_logic import *
from Framework_RentCalculator_Project.test_data.test_data_file import *
import pytest
from time import sleep
def test_mortgage_calculator(cal_setup):
cal_setup.get_mortgage_link()
sleep(5)
cal_setup.get_calculate()
sleep(5)
cal_setup.get_monthly_pay()
| [
"71609807+swapnali2020@users.noreply.github.com"
] | 71609807+swapnali2020@users.noreply.github.com |
a6f974bb74e58b7e2afa241d5f8112f0e4f35075 | b3d340408c88d2cbe7f9d0c9ed7cad53dedfcca9 | /Projects/project1- DataModellingWithPostgres/sql_queries.py | 80a75a53f446c9d0d0e23000aa461f863c8b4588 | [] | no_license | MyDataDevOps/DataEngineeringNanoDegree | 3c2e563b73f74cf9b9ac1c6fc0a1117ae9c14b40 | c061dbede550e18111de346e58dfb5f258e4c63f | refs/heads/master | 2023-03-15T10:11:17.745980 | 2020-03-15T19:10:09 | 2020-03-15T19:10:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,741 | py | # DROP TABLES
songplay_table_drop = "DROP TABLE IF EXISTS songplays"
user_table_drop = "DROP TABLE IF EXISTS users"
song_table_drop = "DROP TABLE IF EXISTS songs"
artist_table_drop = "DROP TABLE IF EXISTS artists"
time_table_drop = "DROP TABLE IF EXISTS time"
# CREATE TABLES
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id SERIAL PRIMARY KEY,
start_time TIMESTAMP NOT NULL,
user_id INT REFERENCES users (user_id),
level VARCHAR,
song_id VARCHAR REFERENCES songs (song_id),
artist_id VARCHAR REFERENCES artists(artist_id),
session_id INT NOT NULL,
location VARCHAR,
user_agent VARCHAR
);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (
user_id INT PRIMARY KEY,
first_name VARCHAR,
last_name VARCHAR,
gender CHAR(1) NOT NULL,
level VARCHAR NOT NULL
);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (
song_id VARCHAR PRIMARY KEY,
title VARCHAR NOT NULL,
artist_id VARCHAR NOT NULL REFERENCES artists (artist_id),
year INT NOT NULL,
duration NUMERIC NOT NULL
);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists (
artist_id VARCHAR PRIMARY KEY,
name VARCHAR NOT NULL,
location VARCHAR,
latitude NUMERIC,
longitude NUMERIC
);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (
start_time TIMESTAMP PRIMARY KEY,
hour INT NOT NULL,
day INT NOT NULL,
week INT NOT NULL,
month INT NOT NULL,
year INT NOT NULL,
weekday INT NOT NULL
);
""")
# INSERT RECORDS
songplay_table_insert = ("""
INSERT INTO songplays
(start_time, user_id, level, song_id,
artist_id, session_id, location, user_agent)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s)
""")
user_table_insert = ("""
INSERT INTO users
(user_id, first_name, last_name, gender, level)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (user_id)
DO UPDATE
SET level = EXCLUDED.level
""")
song_table_insert = ("""
INSERT INTO songs
(song_id, title, artist_id, year, duration)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (song_id)
DO NOTHING
""")
artist_table_insert = ("""
INSERT INTO artists
(artist_id, name, location, latitude, longitude)
VALUES (%s, %s, %s, %s, %s)
ON CONFLICT (artist_id)
DO NOTHING
""")
time_table_insert = ("""
INSERT INTO time
(start_time, hour, day, week, month, year, weekday)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT(start_time)
DO NOTHING
""")
# FIND SONGS
song_select = (""" SELECT s.song_id AS song_id, a.artist_id AS artist_id
FROM songs s JOIN artists a
ON s.artist_id = a.artist_id
WHERE s.title = %s AND
a.name = %s AND
s.duration = %s
""")
# QUERY LISTS
create_table_queries = [user_table_create, time_table_create, artist_table_create, song_table_create,
songplay_table_create]
drop_table_queries = [songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop] | [
"naqeebasif@gmail.com"
] | naqeebasif@gmail.com |
b67a04763b8d49ece387f189e5fd7a74bfc988bd | 6ff5168abeaa017bda64513c503ffb137b878333 | /venv/Scripts/pip-script.py | cde7dc14c6c06e5587c1b997c109128e6d8ec5a0 | [] | no_license | SYC1123/PRRecommend | 0a106c472ad1f2ddaa7b03d31c631b5cbff06135 | 3ab56c1274755635775b23b82a8ee93c2d84810e | refs/heads/master | 2020-08-01T02:55:33.198258 | 2019-09-25T12:15:12 | 2019-09-25T12:15:12 | 210,836,643 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 402 | py | #!D:\Python_project\PRRecommend\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip')()
)
| [
"874795069@qq.com"
] | 874795069@qq.com |
141b7d8f033b70aa4d95c4750c3fe9633c26195a | 30ccb1c915262bb56ab72de8284fdf98c1c97e0c | /flaskblog/__init__.py | 8ea7e77fe4d9f10d70a2b98e848b8a2e661a5c9a | [] | no_license | Shrav543/FlaskApp | 8f31aa888085d9e509931fc5292fafebf1f0943d | 75b0ffe4463e68e3350793395ad93545b3154d05 | refs/heads/master | 2022-11-06T11:38:50.554246 | 2020-06-25T05:19:15 | 2020-06-25T05:19:15 | 274,318,807 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 856 | py | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bcrypt import Bcrypt
from flask_login import LoginManager
app = Flask (__name__)
# instance of Flask class #__name__ is the name of the module. can be __main__ of if imported will be name of the file that imports.
# this is required while using forms
app.config['SECRET_KEY'] = 'df963732f50bff2f4051378117aa7f4b'
#we need to specify the path for the database
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///site.db'
db = SQLAlchemy(app)
bcrypt= Bcrypt(app)
login_manager = LoginManager(app)
login_manager.login_view= 'login' #this is used for @login_required decorator
login_manager.login_message_category = 'info'
# this needs to be after db for circular import
#also when we run the app
from flaskblog import routes | [
"er.gauravsharma543@gmail.com"
] | er.gauravsharma543@gmail.com |
bbe320a76e29addef7647323d72c9f06081f7e8f | 60b7b2976c6c786b31e4a3a5c2e0aa38e51354fd | /frontend/views.py | 30fea625929003bffc3b5ff23bd231843333072b | [] | no_license | jasonlimantoro/mysite | 781a2e6bc43171e2ff46304667991e4f303db8be | 88cfc269b37e9a0d0dda54955f4d2571f4aa6eb3 | refs/heads/master | 2022-11-23T17:51:11.526363 | 2019-09-03T10:41:15 | 2019-09-03T10:41:15 | 147,994,690 | 0 | 0 | null | 2022-11-22T02:57:30 | 2018-09-09T05:02:56 | Python | UTF-8 | Python | false | false | 2,287 | py | from django.contrib.auth import login
from django.db.models import Count, Q
from django.shortcuts import render, redirect
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from frontend.models import Category, Blog, Profile
from admin.forms import CommentForm
def home(request):
query = request.GET.get('query')
blogs = Blog.objects.annotate(visible_comments=Count('comments', filter=Q(comments__is_hidden=False)))
if query:
blogs = blogs.filter(title__icontains=query)
return render(request, 'frontend/home.html', {
'blogs': blogs,
})
def show_category(request, id):
category_to_show = Category.objects.get(pk=id)
query = request.GET.get('query')
blogs = category_to_show.blogs.all()
if query:
blogs = blogs.filter(title__icontains=query)
return render(request, 'frontend/categories/show.html', {
'category_to_show': category_to_show,
'blogs': blogs,
})
def show_blog(request, id):
blog = Blog.objects.get(pk=id)
visible_comments = blog.comments.filter(is_hidden=False)
is_liked = blog.is_liked_by(request.user)
form = CommentForm()
return render(request, 'frontend/blogs/show.html', {
'blog': blog,
'is_liked': is_liked,
'visible_comments': visible_comments,
'form': form,
})
def register(request):
form = UserCreationForm()
return render(request, 'frontend/pages/registration.html', {'form': form})
def signup(request):
form = UserCreationForm(request.POST)
if form.is_valid():
user = form.save()
Profile.objects.create(user=user)
login(request, user)
messages.success(request, "Welcome to the administration board!")
else:
return render(request, 'frontend/pages/registration.html', {'form': form})
return redirect('admin:index')
def login_view(request):
form = AuthenticationForm(data=request.POST)
if form.is_valid():
user = form.get_user()
login(request, user)
messages.success(request, "You are logged in!")
if 'next' in request.POST:
redirect(request.POST.get('next'))
return redirect('admin:index')
return redirect('frontend:home')
| [
"jasonlimantoro99@gmail.com"
] | jasonlimantoro99@gmail.com |
59b2e10cd431494b25e4e9394b9281d8646a26de | 847375892387090d1d6bdd1fd6aa24e7b5577d7a | /board.py | b79cf4112f350b9ba93024427a23bddcd5d366ee | [] | no_license | lostfile/messagepy | 9bb72e70bafa28275895ad662873a6f33ed5abe0 | 52946a277b51842c04435cac773020111b4e3727 | refs/heads/master | 2022-05-22T20:32:03.778756 | 2020-04-21T22:26:29 | 2020-04-21T22:26:29 | 257,305,173 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 85 | py |
print("menu")
print("1. Post")
print("2. View")
print("3. Quit")
goat = input(">.")
| [
"noreply@github.com"
] | lostfile.noreply@github.com |
2b982037556db5eda0ff9258c738a48e81f70691 | 10bc97eb2d3152e8235f8f91a4cadc6f446342c1 | /python_scripts/coned-wntr-tester.py | 984d213078ba50de5b10c49276059f917dafc698 | [] | no_license | sbw323/CEd-nx-wntr | 2ad0ecc87de93c052e7d40d4a0f292eff4ba1b9f | 267b822538894f9d326e1ad5d72ba34c80db326b | refs/heads/master | 2021-12-14T04:08:25.365347 | 2020-12-25T18:06:19 | 2020-12-25T18:06:19 | 240,288,886 | 0 | 0 | null | 2020-10-25T23:18:38 | 2020-02-13T15:08:03 | Jupyter Notebook | UTF-8 | Python | false | false | 2,465 | py | import wntr
import networkx as nx
import pandas as pd
from pandas import DataFrame as df
from collections import defaultdict
import matplotlib
import matplotlib.pyplot as plt
pnodes = "/Users/aya/Documents/NYU/progressfolders/10152019/NYU2-nodes.xls"
ppipes = "/Users/aya/Documents/NYU/progressfolders/10152019/NYU-pipes.xls"
pregulators = "/Users/aya/Documents/NYU/progressfolders/10152019/NYU3-regulators.xls"
pipesdf0 = pd.read_excel(ppipes, sheet_name='Sheet1')
nodesdf0 = pd.read_excel(pnodes, sheet_name='Sheet1')
regulatorsdf = pd.read_excel(pregulators, sheet_name='Sheet1')
pipesdf0.dropna(axis = 1, how = 'all', inplace = True)
nodesdf0.dropna(axis = 1, how = 'all', inplace = True)
regulatorsdf.dropna(axis = 1, how = 'all', inplace = True)
for i, name in enumerate(pipesdf0.columns):
print(i+1, name)
for i, name in enumerate(nodesdf0.columns):
print(i+1, name)
for i, name in enumerate(regulatorsdf.columns):
print(i+1, name)
pos_dict = defaultdict(list)
for i, j, k in zip(nodesdf0.NAME,nodesdf0.NodeXCoordinate,nodesdf0.NodeYCoordinate):
pos_dict[i].append(j)
pos_dict[i].append(k)
pos_dict0 = dict(pos_dict)
wn = wntr.network.WaterNetworkModel()
node_list = list(nodesdf0.NAME)
for i in node_list:
wn.add_junction(name = i, base_demand=10, demand_pattern='1', elevation=0, coordinates=pos_dict0[i])
for i, label in enumerate(pipesdf0['NAME']):
pname = label
pdest = pipesdf0['FacilityToNodeName'].iloc[i]
psource = pipesdf0['FacilityFromNodeName'].iloc[i]
plen = pipesdf0['PipeLength'].iloc[i]
pdia = pipesdf0['PipeDiameter'].iloc[i]
prough = pipesdf0['PipeRoughness'].iloc[i]
wn.add_pipe(name = pname, start_node_name=psource, end_node_name=pdest, length=plen, diameter=pdia, roughness=prough, minor_loss=0)
G = wn.get_graph()
hangingnodes = list(nx.isolates(G))
pipecheck_tolist = []
pipecheck_tolist.extend(pipesdf0['FacilityToNodeName'])
pipecheck_fromlist = []
pipecheck_fromlist.extend(pipesdf0['FacilityFromNodeName'])
to_temp = [x for x in hangingnodes if x in pipecheck_tolist]
from_temp = [x for x in hangingnodes if x in pipecheck_fromlist]
if len(to_temp) == 0:
print("error:no pipes connecting to regulators")
if len(from_temp) == 0:
print("error:no pipes connecting from regulators")
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 30
fig_size[1] = 30
plt.rcParams["figure.figsize"] = fig_size
print("Current size:", fig_size)
wntr.graphics.plot_network(wn)
| [
"noreply@github.com"
] | sbw323.noreply@github.com |
396f5e3265ed88b447424af10f7085e05f91ff9a | a487b1e244084a6b10280fa12829d0b042359277 | /blog/views.py | d01eb515555078d0d853b56a0cefac70e8289f7c | [] | no_license | YouKnowBagu/musician_website | 5146e10a2a90b433bebb00885bca94482b9ebd92 | 17621388e47abd4ad855bec42f2a0fe2a30cc91c | refs/heads/master | 2020-07-09T20:48:09.677939 | 2017-06-14T01:38:20 | 2017-06-14T01:38:20 | 94,262,006 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,638 | py | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.views import generic
from django.utils import timezone
from .models import Post
class IndexView(generic.ListView):
template_name = 'blog/index.html'
context_object_name = 'latest_post_list'
def get_queryset(self):
"""
Return the last five published questions (not including those set to be
published in the future).
"""
return Post.objects.filter(
pub_date__lte=timezone.now()
).order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Post
template_name = 'blog/detail.html'
def get_queryset(self):
"""
Excludes any questions that aren't published yet.
"""
return Post.objects.filter(pub_date__lte=timezone.now())
def vote(request, question_id):
post = get_object_or_404(Post, pk=post_id)
try:
selected_choice = post.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'blog/detail.html', {
'post': post,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always return an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button.
return HttpResponseRedirect(reverse('blog:results', args=(post.id,))) | [
"sean.p.sampson@gmail.com"
] | sean.p.sampson@gmail.com |
12c45619b37366eb1f3468d52e536ab8a0e879c6 | f4c6e3c9e20a703dd67409230bb376eeda7fb5df | /py/get_sample.py | e63ab3c24e4d48ff082dd8a89c065cd75600b400 | [] | no_license | mykytyn/QuasarVariability | 11d097645bbf7f88cee535decfb186c8a56e1f01 | 3f0bd170101351d77f9ed4162106530b42e465ce | refs/heads/master | 2021-01-17T10:17:57.037046 | 2016-06-10T20:09:07 | 2016-06-10T20:09:07 | 11,738,278 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 338 | py | import numpy as np
import pyfits
import random
table = pyfits.open('quasar.fits')
data = table[1].data
print len(set(data['headobjid']))
print len(data['headobjid'])
assert False
random_sample = random.sample(data['headobjid'],256)
f = open('256sample.txt', 'w')
for samp in random_sample:
f.write('{}\n'.format(samp))
f.close()
| [
"dwm261@nyu.edu"
] | dwm261@nyu.edu |
4e0da433bcbd13746fa0f25cce2d056b859779a3 | 4d287c71a1136f386cb00147e2053d1d37a7ee62 | /python/live.py | 9c26ae6800b3c922a6762e96f7d23e3899f3be4a | [] | no_license | cpreinholtz/flyteensy | 02c7a689bd102bd98c9634fc7613bbb6b8bc7574 | a1834ba0c1b7dfb96e5446c598390f836be85707 | refs/heads/master | 2020-04-30T20:39:45.130222 | 2019-04-16T05:53:58 | 2019-04-16T05:53:58 | 177,073,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,655 | py | import serial
import time
import csv
import tkinter
from multiprocessing import Process
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
from matplotlib.widgets import Slider, Button, RadioButtons
#import matplotlib
#print(matplotlib.matplotlib_fname())
#import matplotlib.rcsetup as rcsetup
#print(rcsetup.all_backends)
##################################################################
#Serial
ser = serial.Serial('/dev/ttyACM0')
maxSamples=250*5 #(250 hz * 10 seconds)
refreshAt=100
refresh=0
epoch=[]
ep=[]
ei=[]
ed=[]
result=[]
kp=1.7
ki=0.7
kd=2.65
epoch_index=0
ep_index=1
ei_index=2
ed_index=3
result_index=4
kp_index=5
ki_index=6
kd_index=7
total_len=8
##################################################################
#Plotting
style.use('fivethirtyeight')
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
#ax= fig.add_subplot(1,1,1)
plt.subplots_adjust(left=0.1, bottom=0.25)
def updateKp(val):
global ser
print("KP slider changed"); print (val)
sent="p"+"{:.3f}\r\n".format(val)
ser.write(sent.encode())
print (sent)
def updateKi(val):
global ser
print("KI slider changed"); print (val)
sent="i"+"{:.3f}\r\n".format(val)
ser.write(sent.encode())
print (sent)
def updateKd(val):
global ser
print("KD slider changed"); print (val)
sent="d"+"{:.3f}\r\n".format(val)
ser.write(sent.encode())
print (sent)
def animate():
global ax1
global epoch, ep, ei, ed, result, kp, ki, kd
ax1.clear()
#ax1.plot()
ax1.plot(epoch,ep,label='error.p')
ax1.plot(epoch,ei,label='error.i')
ax1.plot(epoch,ed,label='error.d')
ax1.plot(epoch, result,label='result')
ax1.legend()
x1,x2,y1,y2 = plt.axis()
plt.axis((x1,x2,-50,50))
plt.pause(0.000001)
delta_f = 0.05
axcolor = 'lightgoldenrodyellow'
kpax = plt.axes([0.1, 0.15, 0.8, 0.03], facecolor=axcolor)
kiax = plt.axes([0.1, 0.1, 0.8, 0.03], facecolor=axcolor)
kdax = plt.axes([0.1, 0.05, 0.8, 0.03], facecolor=axcolor)
kpslider = Slider(kpax, 'KP', 0.0, 6.0, valinit=kp, valstep=delta_f)
kislider = Slider(kiax, 'KI', 0.0, 6.0, valinit=ki, valstep=delta_f)
kdslider = Slider(kdax, 'KD', 0.0, 6.0, valinit=kd, valstep=delta_f)
kpslider.on_changed(updateKp)
kislider.on_changed(updateKi)
kdslider.on_changed(updateKd)
ser.flushInput()
ser_bytes = ser.readline()
ser.flushInput()
while True:
try:
ser_bytes = ser.readline()
#print(ser_bytes)
row=str(ser_bytes).split(",")
#print(row)
row[epoch_index]
if len(row)<total_len:
print("str too small")
continue
else:
epoch.append( float( row[epoch_index][2:] ) ) #for some reason arduino puts b' in front of epoch
ep.append(float(row[ep_index]))
ei.append(float(row[ei_index]))
ed.append(float(row[ed_index]))
result.append(float(row[result_index]))
kp=float(row[kp_index])
ki=float(row[ki_index])
kd=float(row[kd_index])
if len(ep)>maxSamples:
epoch.pop(0)
ep.pop(0)
ei.pop(0)
ed.pop(0)
result.pop(0)
refresh=refresh+1
if refresh>=refreshAt:
print("hit refresh")
#p = Process(target=plot_graph, args=([epoch,ep,ei,ed,result]) )
#p.start()
refresh=0
animate()
print("refreshing")
#p.join()
except KeyboardInterrupt:
print("quitting");
print("kp: "+str(kp));
print("ki: "+str(ki));
print("kd: "+str(kd));
exit()
except TypeError: print("type error");
except Exception as e: print(e)
| [
"cpr978@gmail.com"
] | cpr978@gmail.com |
9ad295ea46fd0a188b6e39582fa49e84e5500783 | cf8119f75ef5bbe0c2797f062b3c8fdafea9c6c9 | /meetings/migrations/0002_meetings_room.py | 1a074d0c924b0d28fc9e8f1997cf41c693d326d7 | [] | no_license | austinogiza/meeting-planner | c1369839c3cde2625f6e2dbd43ba65bd740506c9 | c774488d89207061d7a1e4550df544bdd9381d04 | refs/heads/master | 2022-06-02T17:12:29.298078 | 2020-05-03T08:14:05 | 2020-05-03T08:14:05 | 260,857,090 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 505 | py | # Generated by Django 3.0.5 on 2020-04-22 15:07
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('meetings', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='meetings',
name='room',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='meetings.Room'),
preserve_default=False,
),
]
| [
"44683018+austinogiza@users.noreply.github.com"
] | 44683018+austinogiza@users.noreply.github.com |
4cd3d45d04bbde67e61a64cad5efbb27ea26f331 | 8a41a7f9340cfa784cb36d35dca1ecb1630e4097 | /Programming/Python/dict_practice/Dictionaries_Ordered.py | daef72a4b09a83b3aa461282c1f773d366a4206e | [] | no_license | anishst/Learn | 02e6b6cce43cf21621d328ef0fc25168267a9a3d | a1aed8b78b19acdb23e20be57b67fb242e0aefc5 | refs/heads/master | 2022-05-13T10:17:40.293640 | 2022-03-30T12:44:21 | 2022-03-30T12:44:21 | 173,595,812 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 179 | py | from collections import OrderedDict
d = OrderedDict()
d['foo'] = 1
d['bar'] = 2
d['spam'] = 3
d['groom'] = 4
for key in d:
print(key,d[key])
import json
print(json.dumps(d)) | [
"sebastian_anish@bah.com"
] | sebastian_anish@bah.com |
146e10df92cdd4a9d1a60a3f232cbc5c790e4ff5 | b47fdabb58d9db257abf7158041bec47e7d1024e | /release/scripts/UpdateGwasStudies.py | 99308a77eb92efb1b04d011581295a12b0e687d7 | [
"Apache-2.0"
] | permissive | PGScatalog/PGS_Catalog | 94c5fb50b1985b6c97d3e509acb3ee81e7bf1f16 | 0068762f601f08c9187bdd0c895793f0fee1b535 | refs/heads/master | 2023-09-01T18:21:47.335888 | 2023-08-31T10:32:39 | 2023-08-31T10:32:39 | 233,808,927 | 9 | 5 | Apache-2.0 | 2023-08-31T10:32:40 | 2020-01-14T09:50:03 | Python | UTF-8 | Python | false | false | 5,187 | py | import requests
from catalog.models import Sample, Score
from pgs_web import constants
class UpdateGwasStudies:
""" Update the GWAS Catalog sample information where it is missing."""
country_sep = ', '
default_val = ''
gwas_rest_url = 'https://www.ebi.ac.uk/gwas/rest/api/studies/'
def __init__(self,verbose=None):
self.samples = Sample.objects.filter(source_GWAS_catalog__isnull=False,sample_number__isnull=True,sample_cases__isnull=True,sample_controls__isnull=True)
self.verbose = verbose
def get_gwas_info(self,sample):
"""
Get the GWAS Study information related to the PGS sample.
Check that all the required data is available
> Parameter:
- gcst_id: GWAS Study ID (e.g. GCST010127)
> Return: list of dictionnaries (1 per ancestry)
"""
study_data = []
gcst_id = sample.source_GWAS_catalog
response = requests.get(f'{self.gwas_rest_url}{gcst_id}')
print(f"\n# {gcst_id}:")
if not response:
print("\tNo response")
return study_data
response_data = response.json()
if response_data:
try:
source_PMID = response_data['publicationInfo']['pubmedId']
for ancestry in response_data['ancestries']:
if ancestry['type'] != 'initial':
continue
ancestry_data = { 'source_PMID': source_PMID }
ancestry_data['sample_number'] = ancestry['numberOfIndividuals']
# ancestry_broad
for ancestralGroup in ancestry['ancestralGroups']:
if not 'ancestry_broad' in ancestry_data:
ancestry_data['ancestry_broad'] = self.default_val
else:
ancestry_data['ancestry_broad'] += self.country_sep
ancestry_data['ancestry_broad'] += ancestralGroup['ancestralGroup']
# ancestry_free
for countryOfOrigin in ancestry['countryOfOrigin']:
if countryOfOrigin['countryName'] != 'NR':
if not 'ancestry_free' in ancestry_data:
ancestry_data['ancestry_free'] = self.default_val
else:
ancestry_data['ancestry_free'] += self.country_sep
ancestry_data['ancestry_free'] += countryOfOrigin['countryName']
# ancestry_country
for countryOfRecruitment in ancestry['countryOfRecruitment']:
if countryOfRecruitment['countryName'] != 'NR':
if not 'ancestry_country' in ancestry_data:
ancestry_data['ancestry_country'] = self.default_val
else:
ancestry_data['ancestry_country'] += self.country_sep
ancestry_data['ancestry_country'] += countryOfRecruitment['countryName']
study_data.append(ancestry_data)
if study_data:
print(f'\t{len(study_data)} distinct ancestries')
if self.verbose:
for anc in study_data:
print(f'\t{anc}')
else:
print("\tNo ancestry")
except:
print(f'Error: can\'t fetch GWAS results for {gcst_id}')
else:
print("\tNo data")
return study_data
def update_studies(self):
for sample in self.samples:
gwas_study = self.get_gwas_info(sample)
new_samples = []
for gwas_ancestry in gwas_study:
new_sample = Sample()
new_sample.source_GWAS_catalog = sample.source_GWAS_catalog
for field, val in gwas_ancestry.items():
if type(val) == str:
val = val.strip()
setattr(new_sample, field, val)
new_sample.save()
# Cohorts - need to be added once the Sample object as been saved,
# i.e. when the Sample `id` has been created
if sample.cohorts:
for cohort in sample.cohorts.all():
new_sample.cohorts.add(cohort)
new_sample.save()
new_samples.append(new_sample)
if new_samples:
# Update Score - sample_variants list
scores = Score.objects.filter(samples_variants__in=[sample])
for score in scores:
print(f"\t>> SCORE updated: {score.id}")
score.samples_variants.remove(sample)
score.samples_variants.add(*new_samples)
# Delete "old" sample
sample.delete()
################################################################################
def run():
gwas_studies = UpdateGwasStudies(verbose=True)
gwas_studies.update_studies()
| [
"lg10@sanger.ac.uk"
] | lg10@sanger.ac.uk |
09c20c7229dd47b51b3939455359302e5b9693ca | 677824edf2d41e73e88032f29dcbac5d98e1cdf8 | /model/data_process.py | 3dfe21c4380fece238583f4979272edeccc3ec5e | [] | no_license | stephenliu0423/RGNN | be143faa0faf747d25684420668b179aa5e29516 | 7c033f74b67f57ade68dd1190b79129c4eda7b17 | refs/heads/main | 2023-07-04T14:32:15.700330 | 2021-08-02T05:39:35 | 2021-08-02T05:39:35 | 391,826,491 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 5,947 | py | import os
import numpy as np
import json
import pandas as pd
from nltk.corpus import stopwords
import pickle
import random
import sys
from nltk.stem import PorterStemmer
data_type = sys.argv[1]
ps = PorterStemmer()
# tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
stopWords = set(stopwords.words('english'))
tags = ['NN', 'NNS', 'NNP', 'NNPS', 'JJ', 'JJR', 'JJS',
'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'PRP']
def ensureDir(dir_path):
d = os.path.dirname(dir_path)
if not os.path.exists(d):
os.makedirs(d)
class data_process(object):
def __init__(self, data_dir):
self.data_dir = data_dir
self.u_text = dict()
self.i_text = dict()
def numb_id(self, data):
uid = []
iid = []
for x in data['users_id']:
uid.append(self.user2id[x])
for x in data['items_id']:
iid.append(self.item2id[x])
data['users_id'] = uid
data['items_id'] = iid
return data
def data_review(self, train_data):
user_rid = {}
item_rid = {}
user_reviews = {}
item_reviews = {}
for line in train_data.values:
if int(line[0]) in user_reviews:
user_reviews[int(line[0])].append(line[2])
user_rid[int(line[0])].append(int(line[1]))
else:
user_reviews[int(line[0])] = [line[2]]
user_rid[int(line[0])] = [int(line[1])]
if int(line[1]) in item_reviews:
item_reviews[int(line[1])].append(line[2])
item_rid[int(line[1])].append(int(line[0]))
else:
item_reviews[int(line[1])] = [line[2]]
item_rid[int(line[1])] = [int(line[0])]
return user_reviews, item_reviews, user_rid, item_rid
def data_load(self, data):
uid = data['users_id'].values
iid = data['items_id'].values
rate = data['rates'].values
return uid, iid, rate
def process_d(self):
def get_count(data, id):
data_groupby = data.groupby(id, as_index=False)
return data_groupby.size()
Data_file = os.path.join(self.data_dir + 'data.json')
f = open(Data_file)
users_id = []
items_id = []
reviews = []
rates = []
print('start extracting data...')
for line in f:
js = json.loads(line)
if str(js['reviewerID']) == 'unknow':
continue
if str(js['asin']) == 'unknow':
continue
users_id.append(str(js['reviewerID']))
items_id.append(str(js['asin']))
reviews.append(js['reviewText'])
rates.append(js['overall'])
data = pd.DataFrame({'users_id': users_id, 'items_id': items_id, 'reviews': reviews, 'rates': rates})[
['users_id', 'items_id', 'reviews', 'rates']]
print('number of interaction:', data.shape[0])
users_count = get_count(data, 'users_id')
items_count = get_count(data, 'items_id')
unique_users = users_count.index
unique_items = items_count.index
self.user2id = dict((x, i) for (i, x) in enumerate(unique_users))
self.item2id = dict((x, i) for (i, x) in enumerate(unique_items))
data = self.numb_id(data)
train_df = pd.DataFrame(
columns=['users_id', 'items_id', 'reviews', 'rates'])
for user in range(len(self.user2id)):
if user not in train_df['users_id'].values:
ddf = data[data.users_id.isin([user])].iloc[[0]]
train_df = train_df.append(ddf)
data.drop(ddf.index, inplace=True)
for item in range(len(self.item2id)):
if item not in train_df['items_id'].values:
ddf = data[data.items_id.isin([item])].iloc[[0]]
train_df = train_df.append(ddf)
data.drop(ddf.index, inplace=True)
print('start splitting dataset...')
# shuffle data and select train set,test set and validation set
data_len = data.shape[0]
index = np.random.permutation(data_len)
data = data.iloc[index]
train_data = data.head(int(data_len * 0.8) - train_df.shape[0])
train_data = pd.concat([train_data, train_df], axis=0)
tv_data = data.tail(int(data_len * 0.2))
valid_data = tv_data.head(int(data_len * 0.1))
# get reviews of each user and item
print('start collect reviews for users and items...')
user_reviews, item_reviews, user_rid, item_rid = self.data_review(
train_data)
assert len(user_reviews) == len(self.user2id)
assert len(item_reviews) == len(self.item2id)
print('start saving...')
train_data1 = train_data[['users_id', 'items_id', 'rates']]
test_data2 = tv_data[['users_id', 'items_id', 'rates']]
valid_data1 = valid_data[['users_id', 'items_id', 'rates']]
train_data1.to_csv(os.path.join(
self.data_dir, 'data_train.csv'), index=False, header=None)
test_data2.to_csv(os.path.join(
self.data_dir, 'data_test.csv'), index=False, header=None)
valid_data1.to_csv(os.path.join(
self.data_dir, 'data_valid.csv'), index=False, header=None)
pickle.dump(user_reviews, open(
os.path.join(self.data_dir, 'user_review'), 'wb'))
pickle.dump(item_reviews, open(
os.path.join(self.data_dir, 'item_review'), 'wb'))
pickle.dump(user_rid, open(os.path.join(
self.data_dir, 'user_rid'), 'wb'))
pickle.dump(item_rid, open(os.path.join(
self.data_dir, 'item_rid'), 'wb'))
print('done!')
if __name__ == '__main__':
np.random.seed(2020)
random.seed(2020)
path = '../data/' + data_type + '/pro_data/'
ensureDir(path)
Data_process = data_process(path)
Data_process.process_d()
| [
"noreply@github.com"
] | stephenliu0423.noreply@github.com |
52b7b2f5dcb464cd81400a0fb5ba7962dfdc5ca5 | a03a7935a191d63bee76fd3b85a61ee27f98904a | /src/visitpy/visit_utils/setup.py | 3e5361e0abaa404122a4e29ab1228fc6a4e762b9 | [] | no_license | cchriste/visit | 57091c4a512ab87efd17c64c7494aa4cf01b7e53 | c72c413f571e56b52fb7221955219f11f4ba19e3 | refs/heads/master | 2020-04-12T06:25:27.458132 | 2015-10-12T15:41:49 | 2015-10-12T15:41:49 | 10,111,791 | 5 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,130 | py | #*****************************************************************************
#
# Copyright (c) 2000 - 2015, Lawrence Livermore National Security, LLC
# Produced at the Lawrence Livermore National Laboratory
# LLNL-CODE-442911
# All rights reserved.
#
# This file is part of VisIt. For details, see https://visit.llnl.gov/. The
# full copyright notice is contained in the file COPYRIGHT located at the root
# of the VisIt distribution or at http://www.llnl.gov/visit/copyright.html.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# - Redistributions of source code must retain the above copyright notice,
# this list of conditions and the disclaimer below.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the disclaimer (as noted below) in the
# documentation and/or other materials provided with the distribution.
# - Neither the name of the LLNS/LLNL nor the names of its contributors may
# be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY,
# LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
# DAMAGE.
#*****************************************************************************
"""
file: setup.py
author: Cyrus Harrison <cyrush@llnl.gov>
distutils setup script for the 'visit_utils' module.
"""
import sys
__system_bytecode_setting = sys.dont_write_bytecode
sys.dont_write_bytecode = True
from distutils.core import setup
import sys
import setup_tests
#
# Support running tests w/ visit's cli.
#
using_visit = False
try:
# the command line string passed to cli
# will confuse distutils, so modify
# sys.argv to only have args passed after
# '-s setup.py'
args = Argv()
sys.argv = [__file__]
sys.argv.extend(args)
using_visit = True
except:
pass
setup(name='visit_utils',
version='0.1',
author = 'Cyrus Harrison',
author_email = 'cyrush@llnl.gov',
description='VisIt Utilties Module',
package_dir = {'visit_utils':'src'},
packages=['visit_utils','visit_utils.qannote'],
cmdclass = { 'test': setup_tests.ExecuteTests})
if using_visit:
sys.exit(0)
| [
"bonnell@18c085ea-50e0-402c-830e-de6fd14e8384"
] | bonnell@18c085ea-50e0-402c-830e-de6fd14e8384 |
bf4b1c0574e58c58b1ad2cf9df06b80212dc895f | bb1e3ee4d305f302ac7f1ca68ed011808c030b79 | /Source/ICP-12/spamAnalysis.py | c979bdd3542f7b9ac5398b84b476e2e5b0b01a76 | [] | no_license | AsharGit/Python-ICP | 3c3f682c633bd5a6e6ce104696c32b1b158e65d3 | 129dcbbcf2f2e3c5da57752528b617f52c2dc2d2 | refs/heads/main | 2023-04-08T05:51:10.419451 | 2021-05-01T05:10:01 | 2021-05-01T05:10:01 | 332,126,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,965 | py | import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers import Dense, Embedding, LSTM, SpatialDropout1D
from matplotlib import pyplot
from sklearn.model_selection import train_test_split
from keras.utils.np_utils import to_categorical
import re
from sklearn.preprocessing import LabelEncoder
# Read csv file
data = pd.read_csv('spam.csv')
# Clean up text by applying lowercase and removing special characters from data
data['v2'] = data['v2'].apply(lambda x: x.lower())
data['v2'] = data['v2'].apply((lambda x: re.sub('[^a-zA-z0-9\s]', '', x)))
# Tokenize and pad the data
max_fatures = 2000
tokenizer = Tokenizer(num_words=max_fatures, split=' ')
tokenizer.fit_on_texts(data['v2'].values)
X = tokenizer.texts_to_sequences(data['v2'].values)
X = pad_sequences(X)
embed_dim = 128
lstm_out = 196
# Create the neural network model
def createmodel():
model = Sequential()
model.add(Embedding(max_fatures, embed_dim,input_length = X.shape[1]))
model.add(LSTM(lstm_out, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(2,activation='sigmoid'))
model.compile(loss = 'categorical_crossentropy', optimizer='adam',metrics = ['accuracy'])
return model
# print(model.summary())
# Label encoding to transform category into integers
labelencoder = LabelEncoder()
integer_encoded = labelencoder.fit_transform(data['v1'])
y = to_categorical(integer_encoded)
# Training and testing the data
X_train, X_test, Y_train, Y_test = train_test_split(X,y, test_size = 0.33, random_state = 42)
# Run the model
batch_size = 32
model = createmodel()
model.fit(X_train, Y_train, epochs = 1, batch_size=batch_size, verbose = 2)
score,acc = model.evaluate(X_test,Y_test,verbose=2,batch_size=batch_size)
# Print the loss and accuracy score
print(score)
print(acc)
print(model.metrics_names)
| [
"32973667+AsharGit@users.noreply.github.com"
] | 32973667+AsharGit@users.noreply.github.com |
7e0304df1c63e7b6ff672baadc49eb77f72e82a7 | 3373d726d553fbd9994df6c61ddf309125ee83bc | /create_time_period_tables.py | 77e5009201e7f9ce745a0ef7eec3266399fe8097 | [
"MIT"
] | permissive | XinyanXiang/Thermaldata | 7a7b614b974798a05c8ef3704ee6bc0d69794642 | 579c3ce8d0b1c815a5866cf9ceb789b81ee0c074 | refs/heads/main | 2023-09-02T10:10:29.312202 | 2021-11-06T03:39:17 | 2021-11-06T03:39:17 | 425,146,144 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 991 | py | import psycopg2
import os
from config import password
from config import database
from config import user
# Connect to the database
try:
connection = psycopg2.connect(database=database, user=user, password=password)
except Exception as e:
print(e)
exit()
current_path = os.getcwd()
channel_folder_list = os.listdir(current_path)
# Query the database, leaving you with a "cursor"--an object you can
# use to iterate over the rows generated by your query.
try:
cursor = connection.cursor()
for folder in channel_folder_list:
if folder.startswith('geo'):
file_path = current_path + "/{0}".format(folder)
for fname in os.listdir(file_path):
if fname.endswith("csv"):
query = '''CREATE TABLE {0} (
id INTEGER,
channel_name TEXT)'''.format{fname}
cursor.execute(query)
except Exception as e:
print(e)
exit()
| [
"noreply@github.com"
] | XinyanXiang.noreply@github.com |
fe14c9a96e8cc5ceb988566f3f44b607c74ee60f | ca7aa979e7059467e158830b76673f5b77a0f5a3 | /Python_codes/p03729/s464510871.py | 1871472a58b9eb44569fe01d1ae8219a23ea2c08 | [] | no_license | Aasthaengg/IBMdataset | 7abb6cbcc4fb03ef5ca68ac64ba460c4a64f8901 | f33f1c5c3b16d0ea8d1f5a7d479ad288bb3f48d8 | refs/heads/main | 2023-04-22T10:22:44.763102 | 2021-05-13T17:27:22 | 2021-05-13T17:27:22 | 367,112,348 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 206 | py | def main():
s1,s2,s3 = map(str,input().split())
ans = False
if s1[len(s1)-1] == s2[0]:
if s2[len(s2)-1]==s3[0]:
ans = True
print("YES" if ans else "NO")
if __name__ == '__main__':
main() | [
"66529651+Aastha2104@users.noreply.github.com"
] | 66529651+Aastha2104@users.noreply.github.com |
1a7ab04e6283e46ea8560a40926a99c1fe7711b0 | bded4a4facbd7eb29b000fcb59232c81a831ecc0 | /venv/bin/easy_install | 5c43d2e9240877b843d61bddd056b05db08fdb9d | [] | no_license | osmiss/Comiq | 0cb2b1d8f6bc30e10629f8a1ea2c8a1a2bc59def | 0adfd97c5022de35033f49b0e78a5a86f73cdee3 | refs/heads/master | 2020-04-01T17:20:28.743803 | 2018-10-20T16:08:22 | 2018-10-20T16:08:22 | 153,424,229 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | #!/Users/joni/PycharmProjects/Comiq/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"joni@dyn232-217.eduroam.net.utu.fi"
] | joni@dyn232-217.eduroam.net.utu.fi | |
d551a8dbaf39b47916bf5373f1be88b1d25d8efc | 6e036feb781ffe840cc7faf0e2227e5ad85769e4 | /Coding/Python/Ron/Trials_and_Materials/123string_09->123string_10.py | ede1958176b569510878b03640ad5bc1ab16e75f | [] | no_license | Ron-Chang/MyNotebook | 13c49bde05d014a7533eda20291ba7873812631d | 7c95096a28b7aa4f38f63b1ecb50f028540b0f88 | refs/heads/master | 2021-06-18T01:17:52.195358 | 2021-02-04T03:15:02 | 2021-02-04T03:15:02 | 165,764,268 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,398 | py | def increment_string(strng):
if strng.isalpha() or strng == "":
return strng + "1"
elif strng.isdigit():
num = str(int(strng)+1)
return "0"*(len(strng)-len(num)) + num
else:
for index, s in enumerate(strng[::-1]):
i = len(strng) - (index+1)
# print(f"{i}: {index} - {s}")
if s.isdigit():
continue
else:
# " * "
# "asd123"
# "d" is including strng[i:]
# print(f"{i}: {index} - {s}")
num = str(int(strng[i+1:])+1)
if len(num) == len(strng[i+1:]):
return strng[:i+1] + num
else:
return strng[:i+1] + "0"*(len(strng[i+1:])-len(num)) + num
"""clever way
def increment_string(strng):
# 取頭 : 去掉右邊所有包含(0-9的數字)
head = strng.rstrip('0123456789')
# 取尾 ; 透過head的長度切片取得尾
tail = strng[len(head):]
如果字尾為空(純文字或是無文字)直接補 "1" ,回傳
if tail == "": return strng+"1"
頭 加 尾(數字+1(透過zfill補齊0(尾的長度)) ,回傳
return head + str(int(tail) + 1).zfill(len(tail))
# txt = "13xz"
# print(txt.zfill(10)) # resutl: 00000013xz
"""
txt = "13xz"
print(txt.zfill(10))
print(increment_string('893822851#`/09'))
| [
"ron.hsien.chang@gmail.com"
] | ron.hsien.chang@gmail.com |
cc0c913331abe5f50e3e501719d521e817e06232 | 915865db25d918a4b2c3296aaa702fedf784b042 | /experiments/amplitude/filters_and_envelope.py | 7fc2b2a7fdaa51ea790575b8d8a11459ab693256 | [] | no_license | nikolaims/pi_nfb | f456484683f31986d12f659ee2a9227a51d5edf4 | 789ad0e20fac7f8d0843b5c3af834e23dcc65e33 | refs/heads/master | 2020-04-05T08:09:23.416703 | 2017-07-25T08:55:32 | 2017-07-25T08:55:32 | 81,811,536 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,404 | py | import pylab as plt
from scipy.signal import *
from utils.data.loaders import get_ideal_signal, load_feedback, get_signal
from utils.filters import magic_filter_taps, min_phase_magic_filter
from utils.sinbase import get_base
from itertools import combinations
import numpy as np
n = 10000
n_components = 50
fs = 250
band = (8, 12)
time = np.arange(n) / fs
# load signal
signal = get_signal()[:n, 15]
# IIR filt filt
w = 0.1
gain = [0, 0, 1, 1, 0, 0]
taps = firwin2(1000, [0 , band[0]-w, band[0], band[1], band[1]+w, fs/2], gain, nyq=fs/2)
ideal = filtfilt(taps, 1, signal)
plt.plot(np.abs(ideal), 'b', alpha=0.6)
plt.plot(np.abs(hilbert(ideal)), 'b')
# fft
from scipy.fftpack import rfft, irfft, fftfreq
W = fftfreq(signal.size, d=1/fs*2)
f_signal = rfft(signal)
cut_f_signal = f_signal.copy()
cut_f_signal[(W<8) | (W>12)] = 0
cut_signal = irfft(cut_f_signal)
plt.plot(np.abs(cut_signal), 'k', alpha=0.6)
plt.plot(np.abs(hilbert(cut_signal)), 'k')
print(np.mean((np.abs(hilbert(cut_signal)) - np.abs(hilbert(ideal)))**2)/np.var(np.abs(hilbert(cut_signal))))
# fir minphase
fir_signal = lfilter(min_phase_magic_filter(), 1, signal)[28:]
plt.plot(np.abs(fir_signal), 'g', alpha=0.6)
plt.plot(np.abs(hilbert(fir_signal)), 'g')
# iir fir
fir_signal = lfilter(magic_filter_taps(), 1, signal)[28:]
plt.plot(np.abs(fir_signal), 'r', alpha=0.6)
plt.plot(np.abs(hilbert(fir_signal)), 'r')
plt.show() | [
"smtnm@ya.ru"
] | smtnm@ya.ru |
df9acff7102ba7093d1df918c8721d0bccd54c52 | 89ad82bfa5bb3aa3312c815f4199c88e12345974 | /test/test_ifc_checker_checkplan.py | 49cf0d81acc925c49a8f75cc27b0302c48ded0e7 | [] | no_license | lucaslmmanoel/python-api-client | 020f55251af5d86a895740d806618ba94f1863b0 | 49dbabfddb576d3b816c84d86f5c1f080f553704 | refs/heads/master | 2021-02-15T22:45:14.735020 | 2020-03-04T14:02:29 | 2020-03-04T14:02:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,110 | py | # coding: utf-8
"""
BIMData API
BIMData API is a tool to interact with your models stored on BIMData’s servers. Through the API, you can manage your projects, the clouds, upload your IFC files and manage them through endpoints. # noqa: E501
The version of the OpenAPI document: v1
Contact: contact@bimdata.io
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import unittest
import bimdata_api_client
from bimdata_api_client.models.ifc_checker_checkplan import IfcCheckerCheckplan # noqa: E501
from bimdata_api_client.rest import ApiException
class TestIfcCheckerCheckplan(unittest.TestCase):
"""IfcCheckerCheckplan unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testIfcCheckerCheckplan(self):
"""Test IfcCheckerCheckplan"""
# FIXME: construct object with mandatory attributes with example values
# model = bimdata_api_client.models.ifc_checker_checkplan.IfcCheckerCheckplan() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| [
"infra@bimdata.io"
] | infra@bimdata.io |
13a66203e2cd3019fcdbfc8a7566574a418a9675 | de6f02399c4825e8ed48a740a34daea2efe90342 | /Answers.py | 7ab9cf58c250093bf828541d9a464b934913b179 | [] | no_license | ARWongQ/CodingBat-Python-Questions-and-Answers | 2693d3985dcc8d3cbe17f049f9d83adbef0f2684 | df396f5012bffb91f8991a74185627c85aa3998e | refs/heads/master | 2021-01-19T04:49:01.659174 | 2017-01-05T19:11:04 | 2017-01-05T19:11:04 | 76,129,421 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 16,983 | py | #1
'''
@parameters
a= int
b= int
negative = boolean
'''
def pos_neg(a, b, negative):
#check if negative
if(negative):
#return true if both numbers are negative
return (a < 0) and (b < 0)
else:
#check if a is positive and b is negative
if( a > 0) and (b < 0):
return True
#check if a is negative and b is positive
elif( a < 0) and (b > 0):
return True
#return False is neither case was achieved
else:
return False
#2
'''
@parameters
str = string
'''
def not_string(str):
#check if the first 3 letter of the given string is "not"
if( str[:3] == "not"):
return str
#else add "not"
else:
return "not " + str
#3
'''
@parameters
str = string
n = chracter to be removed
'''
def missing_char(str, n):
#length of the string
str_length = len(str)
#letter to be removed
removing = str[n]
#remove it
removed= str.replace(removing,"")
return removed
#4
'''
@parameters
str = string
'''
def front_back(str):
#length of the string
str_length = len(str)
#if the string is empty of has only one letter (to not go off the array)
if(str_length <= 1):
return str
else:
#Get the first letter
first_letter= str[0]
#Get the last letter
last_letter = str[str_length - 1]
#Get the middle letters of the strings
middle = str[1:str_length -1]
#return the string with last and first letter flipped
return last_letter + middle + first_letter
#5
'''
@parameters
str = string
'''
def front3(str):
#length of the string
str_length = len(str)
#Check if the length less than 3
if(str_length < 3):
#Store the front of the str
first_letters = str[0:str_length]
#loop twice to add the front_letters
for i in range(0,2):
str += first_letters
return str
else:
#get the front of the letter
first_letter= str[0:3]
#variable to store the answer
new = ""
#loop three times to add the front_letters
for i in range(0,3):
new += first_letter
return new
#6
'''
@parameters
str = string
n = n copies
'''
def string_times(str, n):
#variable to store the new sring
new= ""
#if empty return empty string
if(n == 0):
return "";
else:
#loop n times to and add to thenew string
for i in range(0,n):
new += str
return new
#7
'''
@parameters
str = string
n = number of times to copy the front
'''
def front_times(str, n):
#variable to store the answer
new = ""
#length of the string
str_length = len(str)
#Check if the length is less than 3 (to not go off the array)
if(str_length < 3):
#loop n times to add front
for i in range(0,n):
new += str
else:
#get the first letters of the string
first_letters = str[0:3]
#loop n times and add the front
for i in range(0,n):
new += first_letters
return new
#8
'''
@parameters
str = sring
'''
def string_bits(str):
#variable to store the answer
new = ""
#length of the string
str_length = len(str)
#if the given sring is non-empty
if(str_length > 0):
#loop in counts of two and add the indexed letter
for i in range(0,str_length, 2):
new += str[i]
return new
#9
'''
@parameters
str = string
'''
def string_splosion(str):
#variable to store the answer
new = ""
#get the length of the string
str_length = len(str)
#loop length+1 and add the array of indexes
for i in range(0,str_length+1):
new += str[0:i]
return new
#10
'''
@parameters
nums = array of numbers
'''
def array_count9(nums):
#length of the array
length = len(nums)
#variable to store the answer
count = 0
#loop through the whole array
for i in range(length):
#if the current index is 9 add to the count
if(nums[i] == 9):
count += 1
return count
#11
'''
@parameters
nums = array of numbers
'''
def array_front9(nums):
#flag if there is a 9 at the beginning
Flag_9 = False
#get the length of the array
length = len(nums)
#If the length is greater than 4
if(length > 4):
#change the number of times we need to iterate
length = 4
#loop 4 times to se if there is a 9 in the array
for i in range(length):
#if found return true
if(nums[i] == 9):
Flag_9 = True
return Flag_9
#if not found return false
return Flag_9
#12
'''
@parameters
nums = array of numbers
'''
def array123(nums):
#Get the length of the array
length = len(nums)
#loop length-2 times (to not get off the array)
for i in range(length-2):
#check if the sequence is in the array
if(nums[i] == 1 and nums[i+1] == 2 and nums[i+2] == 3):
return True
return False
#13
'''
@parameters
a = string
b = string
'''
def string_match(a, b):
#Get the length of the two given strings
length_a = len(a)
length_b = len(b)
#Get the smallest string
min_length = min(length_b, length_a)
#Keep track of the # of times they contain the same length 2 substring
count = 0
#Check if length is less than 2
if(min_length < 2):
return count
else:
#loop length-1 (to not go off the array)
for i in range(min_length-1):
#check if the length 2 substring are the same (if so add 1 to count)
if(a[i] == b[i] and a[i+1] == b[i+1]):
count += 1
return count
#14
'''
@parameters
name = string
'''
def hello_name(name):
#create greeting
greeting= "Hello " + name + "!"
return greeting
#15
'''
@parameters
a = string
b = string
'''
def make_abba(a, b):
#concatenate the words in the proper abba order
combined= a + b + b + a
return combined
#16
'''
@parameters
tag = string
word = string
'''
def make_tags(tag, word):
#Create the beginning tag
HTML_tag_1= "<" + tag + ">"
#Create the end tag
HTML_tag_2= "</" + tag + ">"
#Concatenate in proper order
HTML_string = HTML_tag_1 + word + HTML_tag_2
return HTML_string
#17
'''
@parameters
out = string
word = string
'''
def make_out_word(out, word):
#Break the out word in half
out_b = out[0:2]
out_e = out[2:4]
#Concatenate in proper order (sandwich)
combined= out_b + word + out_e
return combined
#18
'''
@parameters
str = string
'''
def extra_end(str):
#Get the proper lengths
length = len(str)
prev_len = length - 2
#Get the last 2 characters
last_c = str[prev_len:length]
#Variable to hold the answer
new = ""
#loop 3 times to concatenate the last characters
for i in range(3):
new += last_c
return new
#19
'''
@parameters
str = string
'''
def first_two(str):
#Get the length of strings
length = len(str)
end= 2
#Check if the length is smaller than two (to not go off the array
if(length < 2):
n = length
#get the first end characters of the string
first = str[0:end]
return first
#20
'''
@parameters
str = string
'''
def first_half(str):
#Get the length of the string
length = len(str)
#Return the first half of the given string
return str[0:length/2]
#21
'''
@parameters
str = string
'''
def without_end(str):
#get the length of the string
length = len(str)
#variable to hold the answer
new= ""
#check if length is smaller than 2
if(length <= 2):
return new
else:
#save the middle characters of the given string
middle = str[1:length-1]
return middle
#22
'''
@parameters
a = string
b = string
'''
def combo_string(a, b):
#Get the lengths of the two given strings
length_a = len(a)
length_b = len(b)
#if a is bigger do bab
if(length_a >= length_b):
return b + a + b
else:
#if b is bigger do aba
return a + b + a
#23
'''
@parameters
a = string
b = string
'''
def non_start(a, b):
#Get the lengths of the two hiven strings
length_a = len(a)
length_b = len(b)
#remove the first characters of each string
new_a = a[1:length_a]
new_b = b[1:length_b]
#concatenate the words
combined = new_a + new_b
return combined
#24
'''
@parameters
str = string
'''
def left2(str):
#get the length of the string
length = len(str)
#Check if less than or equal to 2
if(length <= 2):
return str
else:
#Get the first two characters
beg = str[0:2]
#Get the remaining of the string
rem = str[2:length]
#Concatenate remaining and beginning
combined = rem + beg
return combined
#25
'''
@parameters
nums = array of numbers
'''
def first_last6(nums):
#Get the length of the array
length = len(nums)
#Check if the beginning or end of the array is 6
if(nums[0] == 6 or nums[length-1] == 6):
return True
return False
#26
'''
@parameters
nums = array of numbers
'''
def same_first_last(nums):
#Get the length of the array
length = len(nums)
#Check if the array is not empty and the beg and end are equal
if(length > 0 and nums[0] == nums[length-1]):
return True
return False
#27
def make_pi():
#Creater and return the array
pi = [3,1,4]
return pi
#28
'''
@parameters
a = array of numbers
b = array of numbers
'''
def common_end(a, b):
#Get the lengths of the two given arrays
length_a = len(a)
length_b = len(b)
#check if the first or last elements are equal
if(a[0] == b[0] or a[length_a-1] == b[length_b-1]):
return True
return False
#29
'''
@parameters
nums = array of numbers
'''
def sum3(nums):
#get the lnegth of the array
length = len(nums)
#Variable to store the sum
sum = 0
#loop through the whole array and add the numbers to sum
for i in range(length):
sum += nums[i]
return sum
#30
'''
@parameters
nums = array of numbers
'''
def rotate_left3(nums):
#Get the length of the array
length = len(nums)
#loop length - 1 (to not go off the array)
for i in range(length-1):
#save the current value
temp = nums[i]
#swap
nums[i] = nums[i+1]
#Insert temp
nums[i+1] = temp
return nums
#31
'''
@parameters
nums = array of numbers
'''
def reverse3(nums):
#get the length of the array
length = len(nums)
#save the current value
temp = nums[0]
#swap
nums[0] = nums[length-1]
#Insert temp
nums[length-1] = temp
return nums
#32
'''
@parameters
nums = array of numbers
'''
#This methodology works from beginning to middle and from end to middle swapping the values (middle won't swap)
def reverse_array(nums):
#Get the length of the array
length = len(nums)
loop = length//2
#loop half the length (to not do unecessary looping) (middle wont change place)
for i in range(loop):
#save the current value
temp = nums[i]
#swap
nums[i] = nums[length-1]
#Insert temp
nums[length-1] = temp
#reduce length to move from end to middle
length -= 1
return nums
#33
'''
@parameters
nums = array of numbers
'''
def max_end3(nums):
#Get the length
length = len(nums)
#Check which one is greater
max_value = max(nums[0],nums[length-1])
#loop to change all variables with the max_value
for i in range(length):
nums[i] = max_value
return nums
#34
'''
@parameters
nums = array of numbers
'''
def sum2(nums):
#Get the length
length = len(nums)
#Variable for iterations
loop = 2
sum = 0
#If length is smaller than 2, loop length times
if(length < 2):
loop = length
#loop and add the numbers
for i in range(loop):
sum += nums[i]
return sum
#35
'''
@parameters
a = array of numbers
b = array of numbers
'''
def middle_way(a, b):
#Get the lengths of a and b
length_a = len(a)
length_b = len(b)
#Get the middle int form a and b
mid_a = a[length_a // 2]
mid_b = b[length_b // 2]
#Set them in an array
mid_a_b = [mid_a,mid_b]
return mid_a_b
#36
'''
@parameters
nums = array of numbers
'''
def make_ends(nums):
#Get the length
length = len(nums)
#Get the variables from the beginning and end
beg = nums[0]
end = nums[length-1]
#Set them in an array
answer = [beg,end]
return answer
#37
'''
@parameters
nums = array of numbers
'''
def has23(nums):
#Get the length
length = len(nums)
#loop length times
for i in range(length):
#Check if the array has 2 or 3
if(nums[i] == 2 or nums[i] == 3):
return True
return False
#38
'''
@parameters
cigars = number of cigars (int)
is_weekend = boolean
'''
def cigar_party(cigars, is_weekend):
#Check if the cigars are between 40 and 60 and is not a weekend
if(cigars >= 40 and cigars <= 60 and (not is_weekend)):
return True
#Check if it is a weekend and they have more than 40 cigars
elif(is_weekend and cigars >= 40):
return True
else:
return False
#39
'''
@parameters
you = your fashion (int)
date = your date's fahion (int)
'''
def date_fashion(you, date):
#Get the smaller number
smaller = min(you,date)
#Check if either is equal or greater than 8
if(you >= 8 or date >=8):
#Check if the smallest is smaller or equal than 2
if(smaller <= 2):
return 0
return 2
#Check if either is less than or equal to 2
elif(you <= 2 or date <= 2):
return 0
else:
return 1
#40
'''
@parameters
temp = current temperature (int)
is_summer = boolean
'''
def squirrel_play(temp, is_summer):
#Max temperature
max = 90
#Check if its summer and therefore increase the max temperature
if(is_summer == True):
max = 100
#Check if the temp is between 60 or max
if(temp >= 60 and temp <= max):
return True
else:
return False
#41
'''
@parameters
speed = yoru current driving speed (int)
is_birthday = boolean
'''
def caught_speeding(speed, is_birthday):
#Variable depending on your birthday
birth_d = 0
#Check if it is your birthday if so add 5 to all the speed constrains
if(is_birthday == True):
birth_d = 5
#Check if it is smaller 60 (+5 if birthday)
if(speed <= 60+birth_d):
return 0
#Check if 61 and 80 (+5 if birthday)
elif(speed >= 61+birth_d and speed <= 80+birth_d):
return 1
else:
return 2
#42
'''
@parameters
a = int
b = int
'''
def sorta_sum(a, b):
#Get the sum
sum = a + b
#Check if the sum is between 10 and 19
if(sum >= 10 and sum <= 19):
return 20
else:
return sum
#43
'''
@parameters
day = the day as (int)
vacation = boolean
'''
def alarm_clock(day, vacation):
#Variables for each clock time
alarm_7 = "7:00"
alarm_10 = "10:00"
alarm_off = "off"
#Check if it is a weekday and is not vacation
if(day >= 1 and day <= 5 and not vacation):
return alarm_7
#Check if it is a weekend and is vacation
elif( (day == 0 or day == 6) and vacation):
return alarm_off
else:
return alarm_10
#44
'''
@parameters
a = int
b = int
'''
def love6(a, b):
#Get the sum
sum = a+b
#Get the difference
difference = abs(a - b)
#Check if either the numbers, sum or difference is 6
if(a == 6 or b == 6 or sum == 6 or difference == 6):
return True
else:
return False
#45
'''
@parameters
n = int
outside_mode = boolean
'''
def in1to10(n, outside_mode):
#Check Inside boundaries (False)
if(outside_mode == False):
if(n >= 1 and n <= 10):
return True
else:
return False
else:
#Check Outside boundaries (True)
if(n <= 1 or n >= 10):
return True
else:
return False
#46
'''
@parameters
num = int
'''
def near_ten(num):
#Get the remainder (Mod)
remainder = num % 10
#Check if it is within 2
if(remainder <= 2 or remainder >= 8):
return True
else:
return False
#47
#O(n+m)
#Greedy algorithm
'''
@parameters
small = number of small bricks (int)
big = number of big bricks (int)
goal = desired length (int)
'''
def make_bricks(small,big,goal):
#size
big_size = 5
small_size = 1
#loop to subtract big bricks until found the answer or until they don't fit
for i in range(big):
#Subtract from the goal
goal = goal - big_size
#If possible return True
if(goal == 0):
return True
#If they don't fit anymore break the loop and add back to the goal
elif(goal < 0):
goal = goal + big_size
break
#loop to subtrat small bricks
for i in range(small):
goal = goal - small_size
#If possible return true
if(goal == 0):
return True
return False
#47.2
'''
@parameters
small = number of small bricks (int)
big = number of big bricks (int)
goal = desired length (int)
'''
def make_bricks_2(small,big,goal):
#big bricks size
big_size = 5
#check if the goal is possible with all the given bricks
#check if you hve enough small blocks to make the desired length
if( (goal > big * big_size + small) or (goal % big_size > small)):
#not possible return False
return False
#if possible return True
return True
#48
'''
@parameters
a = int
b = int
c = int
'''
def lone_sum(a, b, c):
sum = a+b+c
if(a == b and a==c):
return 0
elif(a == b):
return c
elif(a == c):
return b
elif(b == c):
return a
return sum
#49
'''
@parameters
a = int
b = int
c = int
'''
def lucky_sum(a, b, c):
if(a == 13):
return 0
elif(b == 13):
return a
elif( c == 13):
return a+b
else:
return a+b+c
| [
"noreply@github.com"
] | ARWongQ.noreply@github.com |
a05028f05c9000689f3179e5f50d874bea420b35 | 041e4b0f5b7734aafa02897b2f9771a7080ad482 | /0x03-python-data_structures/2-replace_in_list.py | f6c13f0960b49fea9a7a703b4f8efc62a33fa795 | [] | no_license | alishabelle/holbertonschool-higher_level_programming | fb79601d8c682bc4c4183a47b85a3ec4d3819579 | aeda412b3c386256b21563c20be0699fcce685fc | refs/heads/master | 2020-05-18T02:47:26.235041 | 2019-09-27T03:53:38 | 2019-09-27T03:53:38 | 184,125,736 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 173 | py | #!/usr/bin/python3
def replace_in_list(my_list, idx, element):
if idx < 0 or idx >= len(my_list):
return (my_list)
my_list[idx] = element
return my_list
| [
"alishabelle98@gmail.com"
] | alishabelle98@gmail.com |
01c8ccbe9e6889846c28217f8e9266445fb7c747 | 4d0eae8dc3c7dabe2ff79fd8ad035be056423ac7 | /weixincl/weixincl/apps/weixin/urls.py | beb633d21134a141c05983baea470dfeeeaf34ee | [
"MIT"
] | permissive | lionsin/weixincl | 2cdb83e3e596c7fe168b31ed6f747715f7919024 | e02f342b00eaea2704a34ca889903747b0fbb167 | refs/heads/master | 2020-11-30T00:48:33.053979 | 2019-12-26T14:16:04 | 2019-12-26T14:16:04 | 230,254,861 | 1 | 2 | MIT | 2019-12-26T14:54:06 | 2019-12-26T11:45:49 | Python | UTF-8 | Python | false | false | 928 | py | from django.conf.urls import url
from weixin import views
urlpatterns = [
# Articles/search?
url(r"^pcnum/search$", views.PcSearchListView.as_view()),
url(r"^collect_list$", views.AddCollectList.as_view()),
url(r"^collect_list/clear$", views.ClearCollectList.as_view()),
url(r"^collect_list/delete$", views.ClearCollectList.as_view()),
url(r"^tasks/add$", views.TaskaddAPIView.as_view()),
url(r"^tasks/list$", views.TasklistAPIView.as_view()),
url(r"^tasks/detail$", views.TaskShowDetailAPIView.as_view()),
url(r"^tasks/delete$", views.TaskdeleteAPIView.as_view()),
# 查看任务文章列表
url(r"^task_arts/list$", views.ArticleShowDetailAPIView.as_view()),
url(r"^history/add$", views.PcSearchHistoryView.as_view()),
url(r"^history$", views.PcSearchHistoryView.as_view()),
# 清空采集列表页
url(r"^history/clear$", views.HistoryClearAPIView.as_view()),
]
| [
"="
] | = |
b7079bec6b5c0dbe81b7b5c606da867ae80b7305 | cabaf81ecca938c8c68828bca436e5fb6f89dab9 | /函数/04_列表加等于(面试题).py | 6429809aa6fbde9a79af7a903b92598db2946497 | [] | no_license | M-qiangZhu/review | beb6bd1d7582d5080f3f7d904b15263e85708ef9 | 91a54e0d4dae3763da4d4f09c62ff3c83381fb54 | refs/heads/master | 2023-03-31T13:27:51.401609 | 2021-03-24T10:17:30 | 2021-03-24T10:17:30 | 302,575,722 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | py | def demo(num, num_list):
print("函数开始")
# num = num + num
num += num
# 列表变量使用 += 不会做先相加在赋值的操作!!!
# 本质上是在调用列表的extend方法, 会修改原来列表的内容
num_list += num_list # 等价于 num_list.extend(num_list)
print(num)
print(num_list)
print("函数完成")
gl_num = 9
gl_list = [1, 2, 3]
demo(gl_num, gl_list)
print(gl_num)
print(gl_list)
| [
"905645916@qq.com"
] | 905645916@qq.com |
74401eaadb624a23ecdac79304dd476f09e3fd9d | b12cdab4b2b14643994343cfeb065c8935df8ccd | /metodos.py | b86b321f978d69b9599d4b5b20d9b0ddf81c8c19 | [] | no_license | paobit/Python | bcf120e37f263964d246aa7679b838968d3f4c8f | 79e1319b1c26b5bc89a53faa7d16ed561a7c817b | refs/heads/main | 2023-07-11T23:13:14.717541 | 2021-08-23T19:15:32 | 2021-08-23T19:15:32 | 363,276,822 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,665 | py |
@classmethod
def setCurso(self, curso):
self.__curso = curso
def setFechaLimite(self, fecha):
self.__fecha = fecha
def setFechaLimite(self, nro):
self.__nro = nro
def setInscripcion(self, inscripcion):
self.__inscripcion = inscripcion
def setTotalCurso(self, total_curso):
self._total_curso= total_curso
def setListaCurso(self, lista_cursos):
self._lista_cursos= lista_cursos
def getListaCurso(self):
return self.__curso
def getFechaLimite(self):
return self.__fecha
def getNroLimite(self):
return self.__nro
def getInscripcion(self):
return self.__inscripcion
def getTotalCurso(self):
return self.__total_costo
def getListaCurso(self):
return self.__lista_cursos
establecimiento= property(setCurso,getListaCurso, getFechaLimite,setFechaLimite, getInscripcion, setInscripcion, getTotalCurso,setTotalCurso, getListaCurso, setListaCurso)
def __str__(self):
return "{} informacion general {} cursos".format(self.curso, self.fecha_limita, self.inscripcion, self.total_cursos, self.lista_cursos)
class AgregarElemento(list): #Creamos una clase Agregarelemento heredando atributos de clase list
def append(self, alumnos): #Definimos que el método append (de listas) añadirá el elemento alumno
print ("Añadido el alumno", alumnos) #Imprimimos el resultado del método
super().append(alumnos) #Incorporamos la función super SIN INDICAR LA CLASE ACTUAL, seguida
#del método append para la variable alumno
n2 = AgregarElemento() #Definimos la clase de nuestra lista llamada "Lista1"
n2.append ('Som') #Añadimos un elemento a la lista como lo haríamos normalmente
n2.append('Sam') #Otro elemento...
print(n2) # Imprimimos la lista para corroborar los alumnos...
class AgregarElemento(list): #Creamos una clase Agregarelemento heredando atributos de clase list
def append(self, curso): #Definimos que el método append (de listas) añadirá el elemento curso
print ("Añadido el alumno", curso) #Imprimimos el resultado del método
super().append(curso) #Incorporamos la función super SIN INDICAR LA CLASE ACTUAL, seguida
#del método append para la variable alumno
n1 = AgregarElemento() #Definimos la clase de nuestra lista llamada "Lista"
n1.append ('danzas') #Añadimos un elemento a la lista como lo haríamos normalmente
n2.append('carpinteria') #Otro elemento...
print(n2) # Imprimimos la lista para corroborar los cursos...
n2.modificacion = "danzas" #Modifico atributo público de clase
| [
"noreply@github.com"
] | paobit.noreply@github.com |
50611f853f33e7ff9224ce3f2774eb527f541eab | 3f3457b5bb0d3cfb085ca2146b289642d4930399 | /inspect_checkpoint.py | 22d0ff7ac99d0cd3b77ad85c4070d6889598f815 | [] | no_license | innodatalabs/tf-seq2seq | b0cad91d2365145e172296f62f00a14ffdc703cd | 6e4978339147f1af81207784747c381632692133 | refs/heads/master | 2021-05-01T06:41:33.068655 | 2016-08-10T10:39:30 | 2016-08-10T10:39:30 | 65,317,568 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,232 | py | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple script for inspect checkpoint files."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import tensorflow as tf
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("file_name", "", "Checkpoint filename")
tf.app.flags.DEFINE_string("tensor_name", "", "Name of the tensor to inspect")
def print_tensors_in_checkpoint_file(file_name, tensor_name):
"""Prints tensors in a checkpoint file.
If no `tensor_name` is provided, prints the tensor names and shapes
in the checkpoint file.
If `tensor_name` is provided, prints the content of the tensor.
Args:
file_name: Name of the checkpoint file.
tensor_name: Name of the tensor in the checkpoint file to print.
"""
try:
reader = tf.train.NewCheckpointReader(file_name)
if not tensor_name:
print(reader.debug_string().decode("utf-8"))
else:
print("tensor_name: ", tensor_name)
print(reader.get_tensor(tensor_name))
except Exception as e: # pylint: disable=broad-except
print(str(e))
if "corrupted compressed block contents" in str(e):
print("It's likely that your checkpoint file has been compressed "
"with SNAPPY.")
def main(unused_argv):
if not FLAGS.file_name:
print("Usage: inspect_checkpoint --file_name=checkpoint_file_name "
"[--tensor_name=tensor_to_print]")
sys.exit(1)
else:
print_tensors_in_checkpoint_file(FLAGS.file_name, FLAGS.tensor_name)
if __name__ == "__main__":
tf.app.run()
| [
"mkroutikov@INNCELPT00068.NEWJERSEY.INNODATA.NET"
] | mkroutikov@INNCELPT00068.NEWJERSEY.INNODATA.NET |
f85c12fb2141e0a77279dc13b68fe54489ab674f | a26554d068f564f2811e7774f5df44326dd6978b | /04. FUNCTIONS/04-02-07. Perfect Number.py | e62a4aef3a2b7574dae7bd4f64c0788746f07a0e | [] | no_license | emma-metodieva/SoftUni_Python_Fundamentals_202009 | 300c8323308af8a7be4efe42836dd6a7866a34b0 | 5f58a2d565dc4c3bf28330888aa6a2d3b1f8125f | refs/heads/master | 2023-01-31T15:28:04.360805 | 2020-12-12T20:39:17 | 2020-12-12T20:39:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 437 | py | # 04-02. FUNCTIONS [Exercise]
# 07. Perfect Number
def perfect_number(number):
perfect = False
sum_divisors = 0
if number > 0:
for i in range(1, number):
if number % i == 0:
sum_divisors += i
if sum_divisors == number:
perfect = True
return perfect
if perfect_number(int(input())):
print('We have a perfect number!')
else:
print('It\'s not so perfect.')
| [
"emma.hristova@gmail.com"
] | emma.hristova@gmail.com |
df79edc2603022b1ab1b3e93a989e4931eec5220 | 9642df3772dc0123d10a05fc4dcb2e076685be27 | /doyouconsider/apps/videos/admin.py | ad783d056d0b50480eae333e2326c28f7c15fa96 | [
"BSD-2-Clause-Views"
] | permissive | orzubalsky/mercury-13 | 4733f47454be0be6bed8bafb3f244a69b163d8f6 | 45745f919f3079ef595b8952e43ffb432dedbffb | refs/heads/master | 2021-01-01T08:21:13.173480 | 2013-07-03T00:44:46 | 2013-07-03T00:44:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 455 | py | from videos.models import *
from django.contrib import admin
class VideosInline(admin.TabularInline):
model = Video
extra = 2
fields = ['page', 'code', 'author']
class VideoAdmin(admin.ModelAdmin):
fields = ['status', 'page', 'code', 'author', 'message']
class PageAdmin(admin.ModelAdmin):
inlines = [VideosInline]
list_display = ('number', 'videos')
admin.site.register(Page, PageAdmin)
admin.site.register(Video, VideoAdmin)
| [
"juviley@gmail.com"
] | juviley@gmail.com |
71a3aa6946feed2ef7063d12f939c4b2da4441d9 | 81efa90f461bfcb2569a570626fe69b59f3b221a | /Core/views/__init__.py | d9c48e0c52d0ce60e94590ca91bd600156d3e789 | [] | no_license | gonvazsua/SportManagement | 1760188f2ba54964a33805981eee191a14822baf | cdc0ed7a625ad1e2ea754074d2594d6a5b1a3b99 | refs/heads/master | 2021-04-09T16:10:29.550367 | 2016-11-27T10:51:52 | 2016-11-27T10:51:52 | 31,132,158 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 156 | py | # -*- encoding: utf-8 -*-
# Importación de todas las vistas
from .administracion import *
from .inicio import *
from .usuarios import *
from .util import * | [
"gonzalovazquezs@gmail.com"
] | gonzalovazquezs@gmail.com |
4d8545ec4c2396a029e4df3f841a6a2014442e80 | 4edbeb3e2d3263897810a358d8c95854a468c3ca | /python3/urllib/urllib1.py | 0a567f305976d5ebce0f80d94faea4c0b095cdf5 | [
"MIT"
] | permissive | jtraver/dev | f505d15d45b67a59d11306cc7252114c265f388b | 2197e3443c7619b856470558b737d85fe1f77a5a | refs/heads/master | 2023-08-06T02:17:58.601861 | 2023-08-01T16:58:44 | 2023-08-01T16:58:44 | 14,509,952 | 0 | 1 | MIT | 2020-10-14T18:32:48 | 2013-11-19T00:51:19 | Python | UTF-8 | Python | false | false | 479 | py | #!/usr/bin/env python3
#!/usr/bin/python
import urllib.request, urllib.parse, urllib.error
def main():
# url = 'https://screener.finance.yahoo.com/stocks.html'
url = 'https://screener.finance.yahoo.com/b?sc=&im=&prmin=0&prmax=&mcmin=&mcmax=&dvymin=0&dvymax=&betamin=&betamax=&remin=&remax=&pmmin=&pmmax=&pemin=&pemax=&pbmin=&pbmax=&psmin=&psmax=&pegmin=&pegmax=&gr=&grfy=&ar=&vw=1&db=stocks'
html = urllib.request.urlopen(url)
print("%s" % html.read())
main()
| [
"john@aeropsike.com"
] | john@aeropsike.com |
9d16853bc6bd8fea1576535c63f93840bda5f4b6 | e0c89e0d4374c307c50444bd57006c54468b7841 | /subs_cipher.py | 3afd1ec1b9f93760b9f71a61ce7753b26b76cfd9 | [] | no_license | xinli2/Map-Cipher-Unzip | c3e3a4355d0b7254675a3a1b52dd5a04852b28e2 | 2bdb76760fe8fa6781dbabc25f31492735aea8db | refs/heads/main | 2023-08-21T11:12:18.867271 | 2021-10-29T23:18:15 | 2021-10-29T23:18:15 | 422,735,460 | 13 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,037 | py | """
File: subs_cipher.py
Author: Xin Li
Purpose: This program actually dose the encoding and
decoding of the substitution sipher.
"""
'''from build_map import build_map
map = build_map(filename)
test = is_valid_map(map)
assert test == True
'''
def encode(map,msg):
lst=[]
new_string =''
string = msg
for i in range (len(string)):
if string[i] in map:
lst.append(map[string[i]])
else:
lst.append(string[i])
for j in lst:
new_string += j
return new_string
def decode(map,msg):
decode_map= {}
value_lst= []
key_lst = []
lst=[]
new_string= ''
for key in map:
value_lst.append(key)
key_lst.append(map[key])
for index in range(len(key_lst)):
decode_map[key_lst[index]] =value_lst[index]
for i in range (len(msg)):
if msg[i] in decode_map:
lst.append(decode_map[msg[i]])
else:
lst.append(msg[i])
for j in lst:
new_string += j
return new_string
| [
"noreply@github.com"
] | xinli2.noreply@github.com |
c870330d64726a144c7e06e901837ff160b600b7 | 233ab17b8a06587c9bda6c302c58ac37aec2bd2d | /benchmark.py | 068eb2a355ce0929bdf6513163585ba70747d6fb | [] | no_license | lyj1ng/sremto | d5e89d23eda65fa4c388464042dd4de33ec276ee | 8b7309f0299fe611896a9aa0543cec34fbb5d3e0 | refs/heads/master | 2020-07-22T18:00:35.742667 | 2019-09-09T13:20:25 | 2019-09-09T13:20:25 | 207,283,006 | 6 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,128 | py | from basic import Ackley, Griewank, Rastrigin, Rosenbrock, \
Schwefel, Sphere, Weierstrass
from task import Task
import scipy.io as sio
import os
def mat2python(filename, flags):
path = os.path.abspath(os.path.dirname(__file__))
file = path + filename
data = sio.loadmat(file)
names = ['GO_Task1', 'GO_Task2', 'Rotation_Task1', 'Rotation_Task2']
parameters = []
for i, flag in enumerate(flags):
if flag!= None:
name = names[i]
parameters.append(data[name])
else:
parameters.append(None)
return parameters
def CI_HS(filename = '\Tasks\CI_H.mat'):
# Complete Intersection相交 and High Similarity (CI+HS)
flags = ['GO_Task1', 'GO_Task2', 'Rotation_Task1', 'Rotation_Task2']
params = mat2python(filename, flags)
Task1 = Griewank(M=params[2], opt=params[0])
Task2 = Rastrigin(M=params[3], opt=params[1])
tasks = [Task(50, Task1.fnc, -100, 100),
Task(50, Task2.fnc, -50, 50)]
return tasks
def CI_MS(filename = '\Tasks\CI_M.mat'):
# Complete Intersection and Medium Similarity (CI+MS)
flags = ['GO_Task1', 'GO_Task2', 'Rotation_Task1', 'Rotation_Task2']
params = mat2python(filename, flags)
Task1 = Ackley(M=params[2], opt=params[0])
Task2 = Rastrigin(M=params[3], opt=params[1])
tasks = [Task(50, Task1.fnc, -50, 50),
Task(50, Task2.fnc, -50, 50)]
return tasks
def CI_LS(filename = '\Tasks\CI_L.mat'):
# Complete Intersection and Low Similarity
flags = ['GO_Task1', None, 'Rotation_Task1',None]
params = mat2python(filename, flags)
Task1 = Ackley(M=params[2], opt=params[0])
Task2 = Schwefel()
tasks = [Task(50, Task1.fnc, -50, 50),
Task(50, Task2.fnc, -500, 500)]
return tasks
def PI_HS(filename = '\Tasks\PI_H.mat'):
# Partial Intersection and High Similarity (PI+HS)
flags = ['GO_Task1', 'GO_Task2', 'Rotation_Task1', None]
params = mat2python(filename, flags)
Task1 = Rastrigin(M=params[2], opt=params[0])
Task2 = Sphere(opt= params[1])
tasks = [Task(50, Task1.fnc, -50, 50),
Task(50, Task2.fnc, -100, 100)]
return tasks
def PI_MS(filename = '\Tasks\PI_M.mat'):
# Partial Intersection and Medium Similarity (PI+MS)
flags = ['GO_Task1',None, 'Rotation_Task1', None]
params = mat2python(filename, flags)
Task1 = Ackley(M=params[2], opt=params[0])
Task2 = Rosenbrock()
tasks = [Task(50, Task1.fnc, -50, 50),
Task(50, Task2.fnc, -50, 50)]
return tasks
def PI_LS(filename = '\Tasks\PI_L.mat'):
# Partial Intersection and Low Similarity (PI+LS)
flags = ['GO_Task1', 'GO_Task2', 'Rotation_Task1', 'Rotation_Task2']
params = mat2python(filename, flags)
Task1 = Ackley(M=params[2], opt=params[0])
Task2 = Weierstrass(M=params[3], opt=params[1])
tasks = [Task(50, Task1.fnc, -50, 50),
Task(25, Task2.fnc, -0.5, 0.5)]
return tasks
def NI_HS(filename = r'\Tasks\NI_H.mat'):
# No Intersection and High Similarity
flags = [None, 'GO_Task2', None, 'Rotation_Task2']
params = mat2python(filename, flags)
Task1 = Rosenbrock()
Task2 = Rastrigin(M=params[3], opt=params[1])
tasks = [Task(50, Task1.fnc, -50, 50),
Task(50, Task2.fnc, -50, 50)]
return tasks
def NI_MS(filename = r'\Tasks\NI_M.mat'):
# No Intersection and Medium Similarity (NI+MS)
flags = ['GO_Task1', 'GO_Task2', 'Rotation_Task1', 'Rotation_Task2']
params = mat2python(filename, flags)
Task1 = Griewank(M=params[2], opt=params[0])
Task2 = Weierstrass(M=params[3], opt=params[1])
tasks = [Task(50, Task1.fnc, -100, 100),
Task(50, Task2.fnc, -0.5, 0.5)]
return tasks
def NI_LS(filename = r'\Tasks\NI_L.mat'):
# No Intersection and Low Similarity (NI+LS)
flags = ['GO_Task1',None, 'Rotation_Task1',None]
params = mat2python(filename, flags)
Task1 = Rastrigin(M=params[2], opt=params[0])
Task2 = Schwefel()
tasks = [Task(50, Task1.fnc, -50, 50),
Task(50, Task2.fnc, -500, 500)]
return tasks | [
"yjli98@qq.com"
] | yjli98@qq.com |
18efe61bb835a1f0cb38b34b7dc3c7e9e2c5e906 | 26c1bb5e850c673f49e6d72b83cf59da697a711b | /csaw_ctf_2014/psifer school/crypto200.py | f1c4bfdd8770138442493667168e5613105bae24 | [] | no_license | ispoleet/ctf-writeups | 80007e14d0b99b6acea7d00901d21a1acf864247 | c965cf15920c99b8ff769626d628c667faedbcb1 | refs/heads/master | 2023-03-18T06:00:22.461509 | 2023-03-14T19:53:54 | 2023-03-14T19:53:54 | 63,794,288 | 17 | 1 | null | null | null | null | UTF-8 | Python | false | false | 4,807 | py | # --------------------------------------------------------------------------------------------------
import socket
import sys
# --------------------------------------------------------------------------------------------------
# simple ROT-n decryption
def rotn(s, off):
chars = "abcdefghijklmnopqrstuvwxyz"
trans = chars[off:]+chars[:off]
rot_char = lambda c: trans[chars.find(c)] if chars.find(c)>-1 else c
return '' . join( rot_char(c) for c in s )
# --------------------------------------------------------------------------------------------------
def transposition(message, key):
# Each string in ciphertext represents a column in the grid.
ciphertext = [''] * key
# Loop through each column in ciphertext.
for col in range(key):
pointer = col
# Keep looping until pointer goes past the length of the message.
while pointer < len(message):
# Place the character at pointer in message at the end of the
# current column in the ciphertext list.
ciphertext[col] += message[pointer]
# move pointer over
pointer += key
# Convert the ciphertext list into a single string value and return it.
return ''.join(ciphertext)
# --------------------------------------------------------------------------------------------------
if __name__ == "__main__":
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(('54.209.5.48', 12345))
print s.recv(1024)
cipher1 = s.recv(1024)
print cipher1
print '-------------------------------------'
print 'extract cipher:', cipher1[102:]
for n in range(0,25): # for each possible n
p1 = rotn(cipher1[102:], n)
if p1[0:3] == 'the':
print 'plaintext 1:', p1[28:]
break
s.send(p1[28:] )
print s.recv(1024)
cipher2 = s.recv(1024)
print cipher2
for i in range(1,30):
p2 = transposition(cipher2[120:], i)
if p2[0:10] == 'I hope you':
print 'i:', i, ' --- ', p2
break
if i == 100:
print 'plaintext 2 not found'
exit
flag = "";
start = 0;
for i in range(0, len(p2)):
if p2[i] == '"':
start = start + 1;
if start == 1 and p2[i]!='"':
flag = flag + p2[i];
flag = flag + '\n';
print 'plaintext 2: ', flag
s.send( stdin.readline() )
#s.send(flag)
cipher3 = s.recv(1024)
print cipher3
cipher3 = cipher3[91:]
print cipher3
plain3 = 'THIST IMEWE WILLG IVEYO UMORE PLAIN TEXTT OWORK WITHY OUWIL LPROB ABLYF INDTH ATHAV INGEX TRACO NTENT THATI SASCI IMAKE STHIS ONEMO RESOL VABLE ITWOU LDBES OLVAB LEWIT HOUTT HATBU TWEWI LLMAK ESURE TOGIV ELOTS OFTEX TJUST TOMAK ESURE THATW ECANH ANDLE ITIWO NDERH OWMUC HWILL BEREQ UIRED LETSP UTTHE MAGIC PHRAS EFORT HENEX TLEVE LINTH EMIDD LERIG HTHER EBLAH LAHOK NOWMO RETEX TTOMA KESUR ETHAT ITISS OLVAB LEISH OULDP ROBAB LYJUS TPUTI NSOME NURSE RYRHY MEORS OMETH INGMA RYHAD ALITT LELAM BLITT LELAM BLITT LELAM BMARY HADAL ITTLE LAMBW HOSEF LEEZE WASWH ITEAS SNOWI DONTW ANTTO MAKET HISHA RDERT HANIT NEEDS TOBEI FYOUV ESOLV EDALO TOFSI MPLEC RYPTO CHALL ENGES YOUPR OBABL YALRE ADYHA VETHE CODEA NDWIL LBREE ZERIG HTTHR OUGHI TIFIT HELPS MOSTO FTHEP LAINT EXTIS STATI CATEA CHOFT HELEV ELSIM NOTAM ASOCH ISTTH EFUNN YTHIN GISTH ATDEP ENDIN GONWH ICHRA NDOMK EYYOU GETTH ATPOE MMIGH TBEEX ACTLY THERI GHTOF FSETT OSUCC ESSFU LLY'
k = ''
for i in range(0, 32):
if cipher3[i] != ' ':
c = abs(ord(cipher3[i])-ord('A'))
p = abs(ord(plain3[i])-ord('A'))
k = k + chr(ord('A') + (26 + c - p) % 26);
# print cipher3[i],plain3[i], k
print k
for l in range(3,16): # for all possible key lengths
for i in range(0,16-l):
if k[i] != k[i+l]: # wrong length
break;
if i >= 4:
print 'length found. Key:', k[:l]
break;
k = k[:l]
#decrypt c
p3 = ''
z = 0
print 'l:', l
for i in range(0, len(cipher3)):
if cipher3[i] != ' ':
c = abs(ord(cipher3[i])-ord('A'))
k3 = abs(ord(k[z])-ord('A'))
p3 = p3 + chr(ord('A') + ((26 + c - k3) % 26));
z = (z + 1) % l
print p3
plaintext3 = p3[p3.find('RIGHTHERE')+9: p3.find('OKNOWMORE')]
print 'plaintext 3: ', plaintext3
#s.send(plaintext3)
s.send( stdin.readline() )
#s.send(flag)
cipher4 = s.recv(1024)
print cipher4
cipher4 = s.recv(1024)
print cipher4
s.close()
# --------------------------------------------------------------------------------------------------
| [
"ispoleet@gmail.com"
] | ispoleet@gmail.com |
cd61eafbb1618f7bbfa0bd57fc0f5fe2b5f8b6b9 | 1589193f99d6f16b833ada18024e279c16537699 | /stock_quotes_app/views.py | 25763b7a2285d7969ffd721e7b23021f84ce5ba2 | [] | no_license | thomasjohnbeeson/djangostocks | 255932548e5249c4dc843ff2a0b0bbb9ad6a9e21 | b6c4d5894dc26c6d9c9d51aadcc18040b34a0577 | refs/heads/master | 2022-07-30T17:45:29.739960 | 2020-05-16T20:05:20 | 2020-05-16T20:05:20 | 264,517,761 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,564 | py | from django.shortcuts import render, redirect
from .models import Stock
from django.contrib import messages
from .forms import StockForm
from django import forms
def home(request):
if request.method == "POST":
ticker = request.POST["ticker"] #will get the symbol from the form
import requests, json
url = "https://cloud.iexapis.com/stable/stock/" + ticker + "/quote?token=pk_45817a824f534dc1856c3be2a7319283"
api_req = requests.get(url)
try:
api_content = json.loads(api_req.content)
except Exception as e:
api_content = "Error..."
return render(request, "home.html", {"api_content": api_content})
else:
return render(request, "home.html", {"ticker": "enter a ticker symbol above..."})
# API key for iexcloud.io (financial information api): pk_45817a824f534dc1856c3be2a7319283
import requests, json
url = "https://cloud.iexapis.com/stable/stock/aapl/quote?token=pk_45817a824f534dc1856c3be2a7319283"
api_req = requests.get(url)
try:
api_content = json.loads(api_req.content)
except Exception as e:
api_content = "Error..."
return render(request, "home.html", {"api_content": api_content})
def about(request):
return render(request, "about.html", {})
def add_stock(request):
if request.method == "POST":
form = StockForm(request.POST or None)
if form.is_valid():
form.save()
messages.success(request, ("Stock has been added successfully!"))
return redirect('add_stock')
else:
messages.success(request, ("There was an error..."))
return redirect("home")
else:
ticker = Stock.objects.all()
import requests, json
output = []
for ticker_item in ticker:
url = "https://cloud.iexapis.com/stable/stock/" + str(ticker_item).lower() + "/quote?token=pk_45817a824f534dc1856c3be2a7319283"
api_req = requests.get(url)
try:
api_content = json.loads(api_req.content)
api_content.update({"ticker": ticker_item})
output.append(api_content)
except Exception as e:
api_content = "Error..."
return render(request, "add_stock.html", {"ticker": ticker, "output":output})
def delete(request,stock_id):
item = Stock.objects.get(pk=stock_id)
item.delete()
messages.success(request, ("Stock has been deleted successfully!"))
return redirect("add_stock")
| [
"thomas.john.beeson@gmail.com"
] | thomas.john.beeson@gmail.com |
b68667ed36210901bf49748ac43baa309ff86635 | d59f33a0ac62da5de6631623acca44ea3f000fb1 | /find a way of maze/cs561hw1.py | 413f9498b79309fd57bd6112b4909c049e1eb68e | [] | no_license | DamnPhD/ArtificialIntelligence.github.io | 8681d22f65e2ed892577bbb4e607a5b306ebb045 | aeef46515457a112ed3b254a2c9a88f749f781a8 | refs/heads/main | 2023-05-03T05:52:05.916875 | 2021-05-24T23:57:59 | 2021-05-24T23:57:59 | 370,513,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 8,341 | py | # 2020 Fall CSCI 561 HW1 by Jun
import heapq
input_file = "input6.txt"
output_file = "output_trial6.txt"
#read in data
search_alg = ""
maze_size = []
maze_in = []
maze_out = []
num_grids = 0
arr_grids = []
with open(input_file, 'r') as file:
search_alg = file.readline().replace('\n', '')
maze_size = [int(x) for x in next(file).split()]
maze_in = [int(x) for x in next(file).split()]
maze_out = [int(x) for x in next(file).split()]
num_grids = int(next(file))
for line in file:
arr_grids.append([int(x) for x in line.split()])
#print(arr_grids)
# operation of the point
def action(in_point, act_num):
out_point = in_point
if act_num == 1:
out_point[0] += 1
elif act_num == 2:
out_point[0] -= 1
elif act_num == 3:
out_point[1] += 1
elif act_num == 4:
out_point[1] -= 1
elif act_num == 5:
out_point[2] += 1
elif act_num == 6:
out_point[2] -= 1
elif act_num == 7:
out_point[0] += 1
out_point[1] += 1
elif act_num == 8:
out_point[0] += 1
out_point[1] -= 1
elif act_num == 9:
out_point[0] -= 1
out_point[1] += 1
elif act_num == 10:
out_point[0] -= 1
out_point[1] -= 1
elif act_num == 11:
out_point[0] += 1
out_point[2] += 1
elif act_num == 12:
out_point[0] += 1
out_point[2] -= 1
elif act_num == 13:
out_point[0] -= 1
out_point[2] += 1
elif act_num == 14:
out_point[0] -= 1
out_point[2] -= 1
elif act_num == 15:
out_point[1] += 1
out_point[2] += 1
elif act_num == 16:
out_point[1] += 1
out_point[2] -= 1
elif act_num == 17:
out_point[1] -= 1
out_point[2] += 1
elif act_num == 18:
out_point[1] -= 1
out_point[2] -= 1
return out_point
def graph(num_grids, arr_grids):
# creat a dictionary to store all the points and their coordinates
point_dic = {}
for i in range(num_grids):
point_dic[str(i)] = arr_grids[i][0:3]
# creat a dictionary to store the graph
graph_dic = {}
for i in range(num_grids):
value_list = [] # list of connected points
num_act = len(arr_grids[i]) - 3
for j in range(num_act):
output_point = action(arr_grids[i][0:3], arr_grids[i][j + 3])
for point, value in point_dic.items():
if value == output_point:
value_list.append(str(point))
graph_dic[str(i)] = value_list
return graph_dic, point_dic
def graph_ucs(num_grids, arr_grids):
# creat a dictionary to store all the points and their coordinates
point_dic = {}
for i in range(num_grids):
point_dic[str(i)] = arr_grids[i][0:3]
# creat a dictionary to store the graph and edge cost
graph_dic_ucs = {}
for i in range(num_grids):
value_list = []
num_act = len(arr_grids[i])-3
for j in range(num_act):
output_point = action(arr_grids[i][0:3], arr_grids[i][j+3])
if arr_grids[i][j+3] > 6:
edge_cost = 14;
else:
edge_cost = 10;
for point, value in point_dic.items():
if value == output_point:
value_list.append([str(point), edge_cost])
graph_dic_ucs[str(i)] = value_list
return graph_dic_ucs, point_dic
def bfs(maze_size, maze_in, maze_out, graph,point_in):
num_step = 0
arr_step = []
explored = []
queue = []
for point, values in point_in.items():
if values == maze_in:
start = point
if values == maze_out:
goal = point
queue.append([start])
if start == goal:
print("start == goal")
while queue:
path = queue.pop(0)
node = path[-1]
if node not in explored:
neighbours = graph[node]
#print(neighbours)
for neighbour in neighbours:
new_path = list(path)
new_path.append(neighbour)
queue.append(new_path)
if neighbour == goal:
return new_path, len(new_path)
explored.append(node)
def ucs(maze_size, maze_in, maze_out, graph_in, point_in):
path = []
explored_nodes =list()
in_frontier = False
for point, values in point_in.items():
if values == maze_in:
start = point
if values == maze_out:
goal = point
if start == goal:
return path
path.append(start)
path_cost = 0
frontier = [(path_cost, path)]
#print(frontier)
while len(frontier) > 0:
path_cost_now, path_now = frontier.pop(0)
#print(path_cost_now)
#print(path_now)
current_node = path_now[-1]
explored_nodes.append(current_node)
if current_node == goal:
return path_now, path_cost_now
neighbours = graph_in[current_node]
neighbours_list_int = [int(neighbour[0]) for neighbour in neighbours]
neighbours_list_int.sort(reverse=False)
neighbours_list_str = [str(neighbour) for neighbour in neighbours_list_int]
#print(neighbours)
#print(neighbours_list_str)
for neighbour in neighbours_list_str:
path_to_neigbhour = path_now.copy()
path_to_neigbhour.append(neighbour)
for node, cost in neighbours:
if node == neighbour:
extra_cost = cost
neighbour_cost = extra_cost + path_cost_now
new_element = (neighbour_cost, path_to_neigbhour)
for i in range(len(frontier)):
if frontier[i][1][0] == neighbour:
in_frontier = True
neighbour_index = i
neighbour_old_cost = frontier[i][0]
else:
in_frontier = False
if (neighbour not in explored_nodes) and not in_frontier:
frontier.append(new_element)
elif in_frontier:
if neighbour_old_cost > neighbour_cost:
frontier.pop(neighbour_index)
frontier.append(new_element)
def output_write(point_in, path, num_step):
with open(output_file, 'w') as file:
file.write(str(num_step-1)+'\n')
file.write(str(num_step) + '\n')
for i in range(num_step):
if i == 0:
for j in range(3):
file.write(str(point_in.get(path[i])[j]) + ' ')
file.write('0'+'\n')
elif i == num_step-1:
for j in range(3):
file.write(str(point_in.get(path[i])[j]) + ' ')
file.write('1')
else:
for j in range(3):
file.write(str(point_in.get(path[i])[j]) + ' ')
file.write('1' + '\n')
def output_write_ucs(point_in, path, cost, cost_step):
with open(output_file, 'w') as file:
file.write(str(cost)+'\n')
file.write(str(len(path)) + '\n')
for i in range(len(path)):
if i == 0:
for j in range(3):
file.write(str(point_in.get(path[i])[j]) + ' ')
file.write('0'+'\n')
elif i == len(path)-1:
for j in range(3):
file.write(str(point_in.get(path[i])[j]) + ' ')
file.write(str(cost_step[-1]))
else:
for j in range(3):
file.write(str(point_in.get(path[i])[j]) + ' ')
file.write(str(cost_step[i-1]) + '\n')
if search_alg == "BFS":
graph_in, point_in = graph(num_grids, arr_grids)
path, num_step = bfs(maze_size, maze_in, maze_out, graph_in, point_in)
output_write(point_in, path, num_step)
elif search_alg == "UCS":
graph_in, point_in = graph_ucs(num_grids, arr_grids)
path, cost = ucs(maze_size, maze_in, maze_out, graph_in, point_in)
cost_step = []
for i in range(len(path)-1):
neighbours = graph_in[path[i]]
for node, value in neighbours:
if path[i+1] == node:
cost_step.append(value)
output_write_ucs(point_in, path, cost, cost_step)
elif search_alg == "A*":
a_star()
| [
"noreply@github.com"
] | DamnPhD.noreply@github.com |
3da70a6e83ec1919aa9a643a5881a7315ec05d95 | 28715977c0764b5885f1847fbd4100703a67b682 | /manage.py | ac6f80a59bbbb06075218de6193190d75f53db90 | [
"Apache-2.0"
] | permissive | junaiddesai/timemanagementappdjango | 0ccf678ac152276be40374c31ac7b5ea911e0a71 | 3977f90ac3cef1b36f112ca86a3a9d9e1f730692 | refs/heads/master | 2021-01-20T22:11:53.600775 | 2016-06-10T12:17:05 | 2016-06-10T12:17:05 | 60,844,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 349 | py | #!/usr/bin/env python
"""
Command-line utility for administrative tasks.
"""
import os
import sys
if __name__ == "__main__":
os.environ.setdefault(
"DJANGO_SETTINGS_MODULE",
"HyperionApp.settings"
)
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
| [
"noreply@github.com"
] | junaiddesai.noreply@github.com |
e1450556c70c9b5607a7ff05bc84adab3fea1d72 | 732b0b3e2ae0e6c498cfd2ed893de60b9fc22a32 | /tests/integration/actions/collections/test_direct_interactive_ee.py | efb5c28f74c6641d5927e5003b6c20613e8003f6 | [
"Apache-2.0"
] | permissive | didib/ansible-navigator | eb7b77c1df30b2e90b663383f0f76b6224e92c02 | 62fdbd05f25fb2d79133b3ab207f53ac2f2d6d36 | refs/heads/main | 2023-08-30T06:43:42.876079 | 2021-10-14T18:42:17 | 2021-10-14T18:42:17 | 425,540,819 | 0 | 0 | Apache-2.0 | 2021-11-07T15:27:54 | 2021-11-07T15:27:53 | null | UTF-8 | Python | false | false | 1,161 | py | """ collections direct from cli interactive with ee
"""
import pytest
from .base import BaseClass
CLI = "ansible-navigator collections --execution-environment true"
testdata = [
(0, CLI, "ansible-navigator collections browse window"),
(1, ":0", "Browse testorg.coll_1 plugins window"),
(2, ":0", "lookup_1 plugin docs window"),
(3, ":back", "Back to browse testorg.coll_1 plugins window"),
(4, ":1", "mod_1 plugin docs window"),
(5, ":back", "Back to browse testorg.coll_1 plugins window"),
(6, ":back", "Back to ansible-navigator collections browse window"),
(7, ":1", "Browse testorg.coll_2 plugins window"),
(8, ":0", "lookup_2 plugin docs window"),
(9, ":back", "Back to browse testorg.coll_2 plugins window"),
(10, ":1", "mod_2 plugin docs window"),
(11, ":back", "Back to browse testorg.coll_2 plugins window"),
(12, ":back", "Back to ansible-navigator collections browse window"),
]
@pytest.mark.parametrize("index, user_input, comment", testdata)
class Test(BaseClass):
"""run the tests"""
TEST_FOR_MODE = "interactive"
EXECUTION_ENVIRONMENT_TEST = True
UPDATE_FIXTURES = False
| [
"noreply@github.com"
] | didib.noreply@github.com |
9157e37ceaf9530fc7ba4bf94a177313c95e4aa7 | 37fd4d53f383d99d6983e9037177d227866be70c | /build/lib/GA/mlr_eval.py | ebff833dbb64c6180496b0d81d505b3305a7ae21 | [] | no_license | lisabang/deapVS | b77df45b4931447707ea2e5b732d136dd64d6e4e | 34c5d57a03044b8743cbfe062fc71c4fb8146c84 | refs/heads/master | 2020-05-23T19:37:49.830906 | 2019-11-14T23:15:34 | 2019-11-14T23:15:34 | 186,910,365 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,517 | py | import numpy as np
import sklearn.model_selection as skms
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
def mlr_r2(X, y):
model = LinearRegression()
model.fit(X, y)
# compute with formulas from the theory
yhat = model.predict(X)
SS_Residual = sum((y - yhat) ** 2)
SS_Total = sum((y - np.mean(y)) ** 2)
r_squared = 1 - (float(SS_Residual)) / SS_Total
adjusted_r_squared = 1 - (1 - r_squared) * (len(y) - 1) / (len(y) - X.shape[1] - 1)
return r_squared, adjusted_r_squared
def mlr_RMSE(x, y):
columnnames = list(x.columns.values)
npones = np.ones(len(y), float)
A_sl = x.values
A = np.column_stack([A_sl, npones])
lstsq, residuals, rank, something = np.linalg.lstsq(A, y, rcond=-1)
degfreedom = y.size - 1
r2 = 1 - residuals / (y.size * y.var())
r2adj = 1 - (((1 - r2) * degfreedom) / (y.size - rank - 2))
RMSE = np.sqrt(1 - r2) * np.std(y)
return RMSE
def kfoldmlr(xi, yi, **kwargs):
"""gives the y-hats for a q2LOO calculation"""
x = xi.values
y = yi.values
nfolds = kwargs["nfolds"]
mean = kwargs["mean"]
kf = skms.KFold(n_splits=nfolds) # indices=None, shuffle=False, random_state=None)
y_hats = []
print(kf)
for train_index, test_index in kf.split(x):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
coefficients = mlrr(x_train, y_train)[0]
resids = mlrr(x_train, y_train)[1]
y_hats.append(resids)
# for e in y_hats:
# cleanyhats.append(float(e))
stack = np.asarray(y_hats)
if mean == True:
return np.mean(stack)
else:
return stack
def kfoldmlrplot(xi, yi, **kwargs):
"""gives the y-hats for a q2LOO calculation"""
x = xi.values
y = yi.values
nfolds = kwargs["nfolds"]
kf = skms.KFold(n_splits=nfolds) # indices=None, shuffle=False, random_state=None)
y_hats = []
print(kf)
for train_index, test_index in kf.split(x):
x_train, x_test = x[train_index], x[test_index]
y_train, y_test = y[train_index], y[test_index]
coefficients = mlrr(x_train, y_train)[0]
resids = mlrr(x_train, y_train)[1]
plt.plot(x_train, y_train, "o", label="Original data", markersize=5)
plt.plot(
x_train,
coefficients[0] * x_train + coefficients[1],
"r",
label="Fitted line",
)
plt.legend()
plt.show()
| [
"lisagbang@gmail.com"
] | lisagbang@gmail.com |
70ab2066e67e40a28a1319413f9475df390744ed | 3d56d74cb06f20a8621ba47666ebe9a8317dd2d8 | /cogs/entertaiment.py | 25f3ebe2cc8f191333711c4058ce5e062aa1659e | [] | no_license | JesusCrespo2823/queta-bot | 7ae73cd6a4a8e6a51d19f5a975a53a1ab25e6530 | f9cdbe67abbe3910cdc9dd05cf36e638f9a70662 | refs/heads/main | 2023-02-11T08:58:35.173392 | 2021-01-10T18:58:18 | 2021-01-10T18:58:18 | 328,455,584 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 548 | py | import discord
import time
import random
from discord.ext import commands
class Entertaiment(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['8ball'])
async def _8ball(self, ctx, *args):
await ctx.send('8ball Command')
@commands.command()
async def roll(self, ctx):
m = await ctx.send(':game_die: Lanzando...')
time.sleep(2)
await m.edit(content=f':game_die: Haz sacado {random.randrange(7)}')
def setup(bot):
bot.add_cog(Entertaiment(bot))
| [
"jdcv2823@gmail.com"
] | jdcv2823@gmail.com |
1365899c4c23dcfbd172f2ce7c9f9b1f9dd1de0b | a8dd547a89ab90ea7a9a75bfddd0aec1b1b92565 | /shixun/jing/jing/spiders/dong.py | 0b8d34ab300d4fb38bea07f3f02a7427be61aeaf | [] | no_license | guangbao123456/asd | feacfd501e1f8dc8fff6cb65caaf39b85d325815 | 61d524078a141d857960c287f5ab90f73c1fe2b5 | refs/heads/master | 2020-04-09T01:29:20.251047 | 2018-12-07T01:34:00 | 2018-12-07T01:34:00 | 159,906,642 | 0 | 0 | null | 2018-12-07T01:34:01 | 2018-12-01T03:52:05 | null | UTF-8 | Python | false | false | 2,341 | py | # -*- coding: utf-8 -*-
import scrapy
import io,sys,time
import re
class DongSpider(scrapy.Spider):
name = 'dong'
allowed_domains = []
start_urls = ['https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&pvid=d74d569c033a409d92b02ea481435d0e']
headers={'user-agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36'}
def parse(self, response):
sys.stdout=io.TextIOWrapper(sys.stdout.buffer,encoding='gb18030')
#print(response.text)
#前30个数据静态的可以直接xpath抓取
all=response.xpath("//ul[@class='gl-warp clearfix']/li/div")
for a in all:
title=a.xpath("//div[@class='p-name p-name-type-2']/a/em/text()").extract()[0]
price=a.xpath("./div[@class='p-price']/strong/i/text()").extract()[0]
print(price)
print(len(all))
#后面30个的数据找接口,需要获取时间戳
t=time.time()
#时间戳需保留5位小数
tt='%.5f'%t
url='https://search.jd.com/s_new.php?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&qrst=1&rt=1&stop=1&vt=2&cid2=653&cid3=655&page=2&s=28&scrolling=y&log_id='+str(tt)+'&tpl=3_M'#&show_items=100001172674,5089273,7694047,7437788,7357933,5089235,7081550,8024543,100000349372,6946605,7049459,5089225,6733024,7643003,7283905,8240587,7299782,8058010,7293056,8261721,7437786,5821455,8735304,100000651175,7437564,100000177756,6600258,8033419,100000287145,7651927'
yield scrapy.Request(url,callback=self.parse1,headers=self.headers)
def parse1(self,response):
all = response.xpath("//li[@class='gl-item']/div")
for b in all:
title = b.xpath(".//div[@class='p-name p-name-type-2']/a/em/text()").extract()[0]
href ='https:'+ b.xpath(".//div[@class='p-name p-name-type-2']/a/@href").extract()[0]
price = b.xpath("./div[@class='p-price']/strong/i/text()").extract()[0]
print(title,href)
yield scrapy.Request(href,callback=self.parse2)
def parse2(self,response):
#详情页
print(response.url)
key=re.findall('com/(.*?)\.html',response.url)[0]
shop=response.xpath("//div[@class='popbox-inner']/div[@class='mt']/h3/a/text()").extract()[0]
print(shop)
#yield | [
"2451947362@qq.com"
] | 2451947362@qq.com |
a2c4acf973dbdfe06b5f9c458d116d1aec810c49 | f8fd9ef26cb69b7223436cc36576d95a6f23014e | /ex066.py | 6292c4017ce406a57c494e235de221b08a9d831d | [] | no_license | danilosp1/Exercicios-curso-em-video-python | dcc26452bc55354eb946a001a40d56f1902eab84 | f3e9e48a664aa892ccaf62f1c4be4bf1728bc716 | refs/heads/master | 2023-08-05T18:03:47.542057 | 2021-10-01T16:48:50 | 2021-10-01T16:48:50 | 412,544,994 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 192 | py | s = 0
val = 0
while True:
n = int(input('Digite um número [999 finaliza]: '))
if(n == 999):
break
val += 1
s += n
print(f'A soma dos {val} números digitados foi {s}') | [
"danilo.sp03@gmail.com"
] | danilo.sp03@gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.