repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
sonir/vsyn_model | refs/heads/master | from sonilab import timed_interpolation
class Shape:
""" Shape Class """
def __init__(self, type, name):
"""
To instanciate, you should set two argments.
The one is type. Type means the shape type. It is also used as address for OSC Message.
The types are /circle, /triangle, /square, /rect, /line, /arc, /wave etc.
The second is name. It is unique name for each shape object.
However, the uniquness of the name must be proofed by user.
"""
self.uid = 0
self.type = type
self.name = name
self.active = 0
self._x1 = timed_interpolation.TimedInterpolation()
self._x1.set(0.5, 0.0)
self._y1 = timed_interpolation.TimedInterpolation()
self._y1.set(0.5, 0.0)
self._x2 = timed_interpolation.TimedInterpolation()
self._x2.set(0.5, 0.0)
self._y2 = timed_interpolation.TimedInterpolation()
self._y2.set(0.5, 0.0)
self._size = timed_interpolation.TimedInterpolation()
self._size.set(0.137, 0.0)
self._height = timed_interpolation.TimedInterpolation()
self._height.set(0.137, 0.0)
self._angle = timed_interpolation.TimedInterpolation()
self._angle.set(0.137, 0.0)
self._freq = timed_interpolation.TimedInterpolation()
self._freq.set(0.137, 0.0)
self._amp = timed_interpolation.TimedInterpolation()
self._amp.set(0.137, 0.0)
self._phase = timed_interpolation.TimedInterpolation()
self._phase.set(0.137, 0.0)
self._thick = timed_interpolation.TimedInterpolation()
self._thick.set(0.137, 0.0)
self.fill = 1
def get_primitive(self):
if self.type == "/circle" :
params = [self._x1.update(), self._y1.update(), self._size.update(), self.fill]
elif self.type == "/triangle" :
params = [self._x1.update(), self._y1.update(), self._size.update(), self._angle.update(), self.fill]
elif self.type == "/square" :
params = [self._x1.update(), self._y1.update(), self._size.update(), self._angle.update(), self.fill]
elif self.type == "/rect" :
params = [self._x1.update(), self._y1.update(), self._x2.update(), self._y2.update(), self._angle.update(), self.fill]
elif self.type == "/line" :
params = [self._x1.update(), self._y1.update(), self._x2.update(), self._y2.update(), self._thick.update()]
elif self.type == "/arc" :
params = [self._x1.update(), self._y1.update(), self._x2.update(), self._y2.update(), self._height.update()]
elif self.type == "/wave" :
params = [self._x1.update(), self._y1.update(), self._x2.update(), self._y2.update(), self._freq.update(), self._amp.update(), self._phase.update(), self._thick.update()]
else:
print "---- Shape.send() :: Unknown type was set !!"
return (self.type, params)
def get(self, variable):
tmp = None
#the variable is flg. return the value simply.
if variable == "uid" or variable == "active" or variable == "fill" or variable == "name" or variable == "type" :
src = "tmp = self." + variable
exec(src)
return tmp
else:
src = "tmp = self._" + variable + ".update()"
exec(src)
return tmp
def set(self, variable, *args):
if args:
val = args[0]
size = len(args)
if variable == "uid" or variable == "active" or variable == "fill" :
src = "self." + variable + "=" + str(val)
exec(src)
return
elif variable == "name" or variable == "type" :
# when the variable is array, then use ""
src = "self." + variable + "=" + "\"" + str(val) + "\""
exec(src)
return
if size == 2:
# if the second argument was set, set it as duration
duration = args[1]
else:
duration = 0.0
# set interpolation
src = "self._" + variable + ".set(" + str(val) + " , " + str(duration) + ")"
exec(src)
| Python | 108 | 38.388889 | 182 | /shape.py | 0.536928 | 0.51442 |
sonir/vsyn_model | refs/heads/master | from sonilab import event
def run(array):
for elm in array:
adr = elm[0]
params = elm[1]
event.bang("/send" , adr, params)
| Python | 7 | 20.714285 | 41 | /send_all.py | 0.548387 | 0.535484 |
sonir/vsyn_model | refs/heads/master | from sonilab import event
import send_all
def send (adr, params):
print adr , " : " ,
for elm in params :
print elm ,
print " /// "
event.add("/send" , send)
array = []
array.append( ("/test1",[1,'a']) )
array.append( ("/test2",[2,'b']) )
array.append( ("/test3",[3,'c']) )
send_all.run(array)
| Python | 18 | 16.722221 | 34 | /ut_send_all.py | 0.54321 | 0.524691 |
sonir/vsyn_model | refs/heads/master | import shape
from sonilab import sl_metro
metro = sl_metro.Metro(1.0)
shape.Shape.__doc__
obj = shape.Shape("/circle" , "foo")
# obj.type = "SQUARE"
obj.active = True
obj.set("x1" , 0.1)
obj.set("y1" , 0.2)
obj.set("y1" , 0.2)
obj.set("x2" , 0.3)
obj.set("y2" , 4.0)
obj.set("size" , 0.131)
obj.set("height" , 0.132)
obj.set("angle" , 0.133)
obj.set("freq" , 0.134)
obj.set("amp" , 0.135)
obj.set("phase" , 0.136)
obj.set("thick" , 0.139)
obj.fill = False
#check all parameters with get method
assert obj.get("type") == "/circle"
assert obj.get("name") == "foo"
assert obj.get("active") == 1
assert obj.get("x1") == 0.1
assert obj.get("y1") == 0.2
assert obj.get("x2") == 0.3
assert obj.get("y2") == 4.0
assert obj.get("size") == 0.131
assert obj.get("height") == 0.132
assert obj.get("angle") == 0.133
assert obj.get("freq") == 0.134
assert obj.get("amp") == 0.135
assert obj.get("phase") == 0.136
assert obj.get("thick") == 0.139
assert obj.get("fill") == 0
#Test parameter managements
obj.set("type" , "/circle") #Test set parameter with set method
rt = obj.get_primitive()
assert rt[0] == "/circle"
params = rt[1]
assert params[0] == 0.1
assert params[1] == 0.2
assert params[2] == 0.131
assert params[3] == 0
#Triangle Test
obj.set("type" , "/triangle")
rt = obj.get_primitive()
assert rt[0] == "/triangle"
params = rt[1]
assert params[0] == 0.1
assert params[1] == 0.2
assert params[2] == 0.131
assert params[3] == 0.133
assert params[4] == 0
#Square Test
obj.set("type" , "/square")
rt = obj.get_primitive()
assert rt[0] == "/square"
params = rt[1]
assert params[0] == 0.1
assert params[1] == 0.2
assert params[2] == 0.131
assert params[3] == 0.133
assert params[4] == 0
#Rect Test
obj.set("type" , "/rect")
rt = obj.get_primitive()
assert rt[0] == "/rect"
params = rt[1]
assert params[0] == 0.1
assert params[1] == 0.2
assert params[2] == 0.3
assert params[3] == 4.0
assert params[4] == 0.133
assert params[5] == 0
#Line Test
obj.set("type" , "/line")
rt = obj.get_primitive()
assert rt[0] == "/line"
params = rt[1]
assert params[0] == 0.1
assert params[1] == 0.2
assert params[2] == 0.3
assert params[3] == 4.0
assert params[4] == 0.139
#ARC Test
obj.set("type" , "/arc")
rt = obj.get_primitive()
assert rt[0] == "/arc"
params = rt[1]
assert params[0] == 0.1
assert params[1] == 0.2
assert params[2] == 0.3
assert params[3] == 4.0
assert params[4] == 0.132
#WAVE Test
obj.set("type" , "/wave")
rt = obj.get_primitive()
assert rt[0] == "/wave"
params = rt[1]
assert params[0] == 0.1
assert params[1] == 0.2
assert params[2] == 0.3
assert params[3] == 4.0
assert params[4] == 0.134
assert params[5] == 0.135
assert params[6] == 0.136
assert params[7] == 0.139
#TEST .set method with int
obj.set("uid" , 137)
assert obj.uid == 137
obj.set("active" , 138)
assert obj.active == 138
obj.set("fill" , 139)
assert obj.fill == 139
# TEST .set method with string
obj.set("type" , "str_test_for_type")
assert obj.type == "str_test_for_type"
obj.set("name" , "str_test_for_name")
assert obj.name == "str_test_for_name"
#restore the shape type
obj.set("type" , "/wave")
obj.set("x1" , 0.0)
print "Basically, you should use setter and getter methods."
print "ex obj.set(\"X1\", 2.0)\n"
#interpolation demo
print "If you set variables with second as second argment then the parameter thanged with interpolation."
print "ex. obj.set(\"x1\" , 10.0, 10.0) # <- means make x1 value change to 10.0 with 10.0 seconds"
obj.set("x1" , 10.0, 10.0)
while True:
if metro.update():
tmp = obj.get_primitive()
params = tmp[1]
print params[0]
if params[0]==10.0:
break
print "OK"
| Python | 160 | 21.68125 | 105 | /ut_shape.py | 0.62965 | 0.55084 |
sonir/vsyn_model | refs/heads/master | import time
import shapes, shape
circle1 = shape.Shape("/circle" , "circle1")
rect1 = shape.Shape("/rect" , "rect1")
shapes.add(circle1.name, circle1)
shapes.add(rect1.name, rect1)
shapes.print_all()
#Check set UID
tupple_adr_and_params1 = shapes.get_primitive(circle1.name)
tupple_adr_and_params2 = shapes.get_primitive(rect1.name)
assert tupple_adr_and_params1[1][0] == 0
assert tupple_adr_and_params2[1][0] == 1
#check get_all
all_obj = shapes.get_all()
for elm in all_obj:
obj = elm[1]
print elm[0], ":" , obj[0], "," , obj[1], "," , obj[2], "," , obj[3]
#How to write and reat each shape
shapes.set("circle1" , "x1", 777.0) #You can set plural parameters with set method
circle1 = shapes.get("circle1") #You can each shape objects with get method
assert circle1.get("x1") == 777.0
#You can set param with time transition
shapes.set("circle1" , "x1", 700.0 , 2.0) #You can set plural parameters with set method
circle1._x1.print_params()
while circle1.get("x1") != 700.0:
print circle1.get("x1") #print the transition
time.sleep(0.1)
#You can see all objects and the parameters with print_all()
shapes.print_all()
print "OK"
| Python | 44 | 25.34091 | 88 | /ut_shapes.py | 0.686799 | 0.63503 |
darkrsw/inference | refs/heads/master | """
A checker for mlperf inference submissions
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import collections
import json
import logging
import os
import re
import sys
import time
# pylint: disable=missing-docstring
logging.basicConfig(level=logging.INFO)
log = logging.getLogger("main")
VALID_MODELS = ["ssd-small", "ssd-large", "mobilenet", "resnet", "gnmt"]
VALID_DIVISIONS = ["open", "closed"]
REQUIRED_PERF_FILES = ["mlperf_log_accuracy.json", "mlperf_log_summary.txt", "mlperf_log_detail.txt"]
REQUIRED_ACC_FILES = REQUIRED_PERF_FILES + ["accuracy.txt"]
REQUIRED_MEASURE_FILES = ["mlperf.conf", "user.conf", "README.md"]
PERFORMANCE_SAMPLE_COUNT = {
"mobilenet": 1024,
"resnet50": 1024,
"resnet": 1024,
"ssd-mobilenet": 256,
"ssd-small": 256,
"ssd-resnet34": 64,
"ssd-large": 64,
"gnmt": 3903900,
}
def get_args():
"""Parse commandline."""
parser = argparse.ArgumentParser()
parser.add_argument("--input", required=True, help="submission directory")
parser.add_argument("--submitter", help="filter to submitter")
args = parser.parse_args()
return args
def list_dir(*path):
path = os.path.join(*path)
return [f for f in os.listdir(path) if os.path.isdir(os.path.join(path, f))]
def list_files(*path):
path = os.path.join(*path)
return [f for f in os.listdir(path) if os.path.isfile(os.path.join(path, f))]
def split_path(m):
return m.replace("\\", "/").split("/")
def check_accuracy_dir(model, dir):
is_valid = False
# look for: accuracy=... or mAP=...
with open(os.path.join(dir, "accuracy.txt"), "r") as f:
for line in f:
m = re.match("^accuracy=([\d\.]+).*", line)
if m:
is_valid = True
break
m = re.match("^mAP=([\d\.]+).*", line)
if m:
is_valid = True
break
m = re.match("^BLEU\:\s*([\d\.]+).*", line)
if m:
is_valid = True
break
# check if there are any errors in the detailed log
fname = os.path.join(dir, "mlperf_log_detail.txt")
with open(fname, "r") as f:
for line in f:
# look for: ERROR
if "ERROR" in line:
# TODO: should this be a failed run?
log.warning("{} contains errors".format(fname))
return is_valid
def check_performance_dir(model, dir):
is_valid = False
rt = {}
# look for: Result is: VALID
fname = os.path.join(dir, "mlperf_log_summary.txt")
with open(fname, "r") as f:
for line in f:
m = re.match("^Result\s+is\s*\:\s+VALID", line)
if m:
is_valid = True
m = re.match("^\s*([\w\s.\(\)\/]+)\s*\:\s*([\d\.]+).*", line)
if m:
rt[m.group(1).strip()] = m.group(2).strip()
if int(rt['performance_sample_count']) < PERFORMANCE_SAMPLE_COUNT[model]:
log.error("{} performance_sample_count should be {}".format(fname, PERFORMANCE_SAMPLE_COUNT[model]))
is_valid = False
# check if there are any errors in the detailed log
fname = os.path.join(dir, "mlperf_log_detail.txt")
with open(fname, "r") as f:
for line in f:
# look for: ERROR
if "ERROR" in line:
# TODO: does this make the run fail?
log.warning("{} contains errors".format(fname))
return is_valid
def files_diff(list1, list2):
"""returns a list of files that are missing or added."""
if list1 and list2:
for i in ["mlperf_log_trace.json", "results.json"]:
try:
list1.remove(i)
except:
pass
if len(list1) > len(list2):
return list(set(list1) - set(list2))
else:
return list(set(list2) - set(list1))
return []
def check_results_dir(dir, filter_submitter):
good_submissions = []
bad_submissions = {}
for division in list_dir("."):
for submitter in list_dir(division):
if filter_submitter and submitter != filter_submitter:
continue
results_path = os.path.join(division, submitter, "results")
for system_desc in list_dir(results_path):
# check if system_id is good. Report failure for each model/scenario.
system_id_json = os.path.join(division, submitter, "systems", system_desc + ".json")
device_bad = not os.path.exists(system_id_json)
for model in list_dir(results_path, system_desc):
if model not in VALID_MODELS:
bad_submissions[os.path.join(system_desc, model)] = \
"{} has an invalid model name {}".format(os.path.join(results_path, system_desc), model)
log.error("{} has an invalid model name {}".format(os.path.join(results_path, system_desc), model))
continue
for scenario in list_dir(results_path, system_desc, model):
name = os.path.join(results_path, system_desc, model, scenario)
acc_path = os.path.join(name, "accuracy")
if not os.path.exists(os.path.join(acc_path, "accuracy.txt")):
log.error("{} has no accuracy.txt. Generate it with accuracy-imagenet.py or accuracy-coco.py or "
"process_accuracy.py".format(acc_path))
diff = files_diff(list_files(acc_path), REQUIRED_ACC_FILES)
if diff:
bad_submissions[name] = "{} has file list mismatch ({})".format(acc_path, diff)
continue
if not check_accuracy_dir(model, acc_path):
bad_submissions[name] = "{} has issues".format(acc_path)
continue
n = ["1"]
if scenario in ["Server"]:
n = ["1", "2", "3", "4", "5"]
for i in n:
perf_path = os.path.join(name, "performance", "run_" + str(i))
diff = files_diff(list_files(perf_path), REQUIRED_PERF_FILES)
if diff:
bad_submissions[name] = "{} has file list mismatch ({})".format(perf_path, diff)
continue
if not check_performance_dir(model, perf_path):
bad_submissions[name] = "{} has issues".format(perf_path)
continue
if device_bad:
bad_submissions[name] = "{}: no such system id {}".format(name, system_desc)
else:
good_submissions.append(name)
for k, v in bad_submissions.items():
log.error(v)
for name in good_submissions:
log.info("{} OK".format(name))
return good_submissions, bad_submissions
def compare_json(fname, template, errors):
error_count = len(errors)
try:
with open(fname, "r") as f:
j = json.load(f)
# make sure all required sections/fields are there
for k, v in template.items():
sz = j.get(k)
if sz is None and v == "required":
errors.append("{} field {} missing".format(fname, k))
# make sure no undefined sections/fields are in the meta data
for k, v in j.items():
z = template.get(k)
if z is None:
errors.append("{} has unknwon field {}".format(fname, k))
except Exception as ex:
errors.append("{} unexpected error {}".format(fname, ex))
return error_count == len(errors)
def check_system_desc_id(good_submissions, systems_json):
errors = []
checked = set()
for submission in good_submissions:
parts = split_path(submission)
system_desc = parts[3]
submitter = parts[1]
division = parts[0]
if division not in VALID_DIVISIONS:
errors.append(("{} has invalid division {}".format(submission, j["submitter"], division)))
continue
fname = os.path.join(parts[0], parts[1], "systems", system_desc + ".json")
if fname not in checked:
checked.add(fname)
if not compare_json(fname, systems_json, errors):
continue
with open(fname, "r") as f:
j = json.load(f)
if j["submitter"] != submitter:
errors.append(("{} has submitter {}, directory has {}".format(fname, j["submitter"], submitter)))
continue
if j["division"] != division:
errors.append(("{} has division {}, division has {}".format(fname, j["division"], division)))
continue
log.info("{} OK".format(fname))
if errors:
for i in errors:
log.error(i)
return errors
def check_measurement_dir(good_submissions, systems_imp_json):
errors = []
for submission in good_submissions:
parts = split_path(submission)
system_desc = parts[3]
measurement_dir = os.path.join(parts[0], parts[1], "measurements", system_desc)
if not os.path.exists(measurement_dir):
errors.append("{} directory missing".format(measurement_dir))
continue
model = parts[4]
scenario = parts[5]
fname = os.path.join(measurement_dir, model, scenario)
files = list_files(fname)
system_file = None
for i in REQUIRED_MEASURE_FILES:
if i not in files:
errors.append("{} is missing {}".format(fname, i))
for i in files:
if i.startswith(system_desc) and i.endswith("_" + scenario + ".json"):
system_file = i
end = len("_" + scenario + ".json")
break
elif i.startswith(system_desc) and i.endswith(".json"):
system_file = i
end = len(".json")
break
if system_file:
compare_json(os.path.join(fname, system_file), systems_imp_json, errors)
impl = system_file[len(system_desc) + 1:-end]
code_dir = os.path.join(parts[0], parts[1], "code", model, impl)
if not os.path.exists(code_dir):
errors.append("{} is missing".format(code_dir))
else:
log.info("{} OK".format(fname))
else:
errors.append("{} is missing {}*.json".format(fname, system_desc))
if errors:
for i in errors:
log.error(i)
return errors
def main():
args = get_args()
script_path = os.path.dirname(sys.argv[0])
with open(os.path.join(script_path, "system_desc_id.json"), "r") as f:
systems_json = json.load(f)
with open(os.path.join(script_path, "system_desc_id_imp.json"), "r") as f:
systems_imp_json = json.load(f)
os.chdir(args.input)
# 1. check results directory
good_submissions, bad_submissions = check_results_dir(args.input, args.submitter)
# 2. check the meta data under systems
meta_errors = check_system_desc_id(good_submissions, systems_json)
# 3. check measurement and code dir
measurement_errors = check_measurement_dir(good_submissions, systems_imp_json)
if bad_submissions or meta_errors or measurement_errors:
log.error("SUMMARY: there are errros in the submission")
return 1
else:
log.info("SUMMARY: submission looks OK")
return 0
if __name__ == "__main__":
sys.exit(main())
| Python | 317 | 36.602524 | 125 | /v0.5/tools/submission/submission-checker.py | 0.540856 | 0.534899 |
znc-sistemas/django-bootstrap-form | refs/heads/master | import re
from math import floor
from django import forms
from django.template import Context
from django.template.loader import get_template
from django import template
from bootstrapform import config
register = template.Library()
@register.filter
def bootstrap(element):
markup_classes = {'label': '', 'value': '', 'single_value': ''}
return render(element, markup_classes)
@register.filter
def bootstrap_inline(element):
markup_classes = {'label': 'sr-only', 'value': '', 'single_value': ''}
return render(element, markup_classes)
@register.filter
def bootstrap_horizontal(element, label_cols={}):
if not label_cols:
label_cols = 'col-sm-2 col-lg-2'
markup_classes = {
'label': label_cols,
'value': '',
'single_value': ''
}
for cl in label_cols.split(' '):
splited_class = cl.split('-')
try:
value_nb_cols = int(splited_class[-1])
except ValueError:
value_nb_cols = config.BOOTSTRAP_COLUMN_COUNT
if value_nb_cols >= config.BOOTSTRAP_COLUMN_COUNT:
splited_class[-1] = config.BOOTSTRAP_COLUMN_COUNT
else:
offset_class = cl.split('-')
offset_class[-1] = 'offset-' + str(value_nb_cols)
splited_class[-1] = str(config.BOOTSTRAP_COLUMN_COUNT - value_nb_cols)
markup_classes['single_value'] += ' ' + '-'.join(offset_class)
markup_classes['single_value'] += ' ' + '-'.join(splited_class)
markup_classes['value'] += ' ' + '-'.join(splited_class)
return render(element, markup_classes)
def add_input_classes(field):
if not is_checkbox(field) and not is_multiple_checkbox(field) and not is_radio(field) and not is_file(field):
field_classes = field.field.widget.attrs.get('class', '')
field_classes += ' form-control'
field.field.widget.attrs['class'] = field_classes
def render(element, markup_classes):
element_type = element.__class__.__name__.lower()
if element_type == 'boundfield':
add_input_classes(element)
template = get_template("bootstrapform/field.html")
context = Context({'field': element, 'classes': markup_classes})
else:
has_management = getattr(element, 'management_form', None)
if has_management:
for form in element.forms:
for field in form.visible_fields():
add_input_classes(field)
template = get_template("bootstrapform/formset.html")
context = Context({'formset': element, 'classes': markup_classes})
else:
for field in element.visible_fields():
add_input_classes(field)
template = get_template("bootstrapform/form.html")
context = Context({'form': element, 'classes': markup_classes})
return template.render(context)
@register.filter
def is_checkbox(field):
return isinstance(field.field.widget, forms.CheckboxInput)
@register.filter
def is_multiple_checkbox(field):
return isinstance(field.field.widget, forms.CheckboxSelectMultiple)
@register.filter
def is_radio(field):
return isinstance(field.field.widget, forms.RadioSelect)
@register.filter
def is_file(field):
return isinstance(field.field.widget, forms.FileInput)
@register.filter
def pagination(page, pages_to_show=11):
"""
Generate Bootstrap pagination links from a page object
"""
context = get_pagination_context(page, pages_to_show)
return get_template("bootstrapform/pagination.html").render(Context(context))
@register.inclusion_tag("bootstrapform/pagination.html")
def bootstrap_pagination(page, **kwargs):
"""
Render pagination for a page
"""
pagination_kwargs = kwargs.copy()
pagination_kwargs['page'] = page
return get_pagination_context(**pagination_kwargs)
def get_pagination_context(page, pages_to_show=11, url=None, size=None, align=None, extra=None):
"""
Generate Bootstrap pagination context from a page object
"""
pages_to_show = int(pages_to_show)
if pages_to_show < 1:
raise ValueError("Pagination pages_to_show should be a positive integer, you specified %s" % pages_to_show)
num_pages = page.paginator.num_pages
current_page = page.number
half_page_num = int(floor(pages_to_show / 2)) - 1
if half_page_num < 0:
half_page_num = 0
first_page = current_page - half_page_num
if first_page <= 1:
first_page = 1
if first_page > 1:
pages_back = first_page - half_page_num
if pages_back < 1:
pages_back = 1
else:
pages_back = None
last_page = first_page + pages_to_show - 1
if pages_back is None:
last_page += 1
if last_page > num_pages:
last_page = num_pages
if last_page < num_pages:
pages_forward = last_page + half_page_num
if pages_forward > num_pages:
pages_forward = num_pages
else:
pages_forward = None
if first_page > 1:
first_page -= 1
if pages_back > 1:
pages_back -= 1
else:
pages_back = None
pages_shown = []
for i in range(first_page, last_page + 1):
pages_shown.append(i)
# Append proper character to url
if url:
# Remove existing page GET parameters
url = unicode(url)
url = re.sub(r'\?page\=[^\&]+', u'?', url)
url = re.sub(r'\&page\=[^\&]+', u'', url)
# Append proper separator
if u'?' in url:
url += u'&'
else:
url += u'?'
# Append extra string to url
if extra:
if not url:
url = u'?'
url += unicode(extra) + u'&'
if url:
url = url.replace(u'?&', u'?')
# Set CSS classes, see http://twitter.github.io/bootstrap/components.html#pagination
pagination_css_classes = ['pagination']
if size in ['small', 'large', 'mini']:
pagination_css_classes.append('pagination-%s' % size)
if align == 'center':
pagination_css_classes.append('pagination-centered')
elif align == 'right':
pagination_css_classes.append('pagination-right')
# Build context object
return {
'bootstrap_pagination_url': url,
'num_pages': num_pages,
'current_page': current_page,
'first_page': first_page,
'last_page': last_page,
'pages_shown': pages_shown,
'pages_back': pages_back,
'pages_forward': pages_forward,
'pagination_css_classes': ' '.join(pagination_css_classes),
}
| Python | 209 | 30.607655 | 115 | /bootstrapform/templatetags/bootstrap.py | 0.610657 | 0.60657 |
Jinho1011/Wesing | refs/heads/master | from django.urls import reverse
from django.shortcuts import render, redirect
from django.forms import modelformset_factory
from django.views.generic import *
from .models import *
class IndexView(ListView):
model = Song
template_name = 'song/song_list.html'
def get_context_data(self, **kwargs):
context = super(IndexView, self).get_context_data(**kwargs)
context['navbar_title'] = 'AAC로 노래해요'
context['navbar_subtitle'] = 'AAC로 노래해요'
return context
class DetailView(DetailView):
model = Song
template_name = 'song/song_detail.html'
def get_context_data(self, **kwargs):
image = Image.objects.select_related('song')
context = super(DetailView, self).get_context_data(**kwargs)
context['navbar_title'] = 'AAC로 노래해요'
context['navbar_subtitle'] = 'AAC로 노래해요'
context['images'] = image
return context
| Python | 29 | 30.310345 | 68 | /song/views.py | 0.667401 | 0.667401 |
Frozen/jinja2-precompiler | refs/heads/master | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from optparse import OptionParser
import logging
import os
import re
import sys
import jinja2
def option_parse():
parser = OptionParser()
parser.add_option("-a", "--all", action="store_true", dest="all_files", help="all files")
parser.add_option("-b", "--base", dest="base", default="", help="base dir name", metavar="DIR")
parser.add_option("-c", "--pyc", action="store_true", dest="pyc", help="byte compile")
parser.add_option("-d", "--debug", action="store_true", dest="debug", help="debug")
parser.add_option("-e", "--ext", dest="extensions", default="html,xhtml", help="list of extension [default: %default]", metavar="EXT[,...]")
parser.add_option("-m", "--modulename", action="store_true", dest="modulename", help="return compiled module file name")
parser.add_option("-q", "--quit", action="store_true", dest="quit", help="no message")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose", help="more messages")
(options, args) = parser.parse_args()
return parser, options, args
def get_module_filename(filename, py_compile=False):
module_filename = jinja2.ModuleLoader.get_module_filename(filename)
if py_compile:
module_filename += "c"
return module_filename
def make_filter_func(target, env, extensions=None, all_files=False):
def filter_func(tpl):
if extensions is not None and os.path.splitext(tpl)[1][1:] not in extensions:
return False
if all_files:
return True
_content, filename, _update = env.loader.get_source(env, tpl)
module_filename = os.path.join(target, get_module_filename(tpl))
if not os.path.isfile(module_filename):
module_filename_pyc = module_filename + "c"
if not os.path.isfile(module_filename_pyc):
return True
else:
module_filename = module_filename_pyc
if os.path.getmtime(filename) > os.path.getmtime(module_filename):
return True
return False
return filter_func
if jinja2.__version__[:3] >= "2.8":
"""
jinja2 2.8 supports walking symlink directories.
see: https://github.com/mitsuhiko/jinja2/issues/71
"""
from jinja2 import FileSystemLoader
else:
class FileSystemLoader(jinja2.FileSystemLoader):
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
super(FileSystemLoader, self).__init__(searchpath, encoding)
self.followlinks = followlinks
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
def main():
def logger(msg):
sys.stderr.write("%s\n" % msg)
parser, options, args = option_parse()
if options.debug:
logging.getLogger().setLevel(logging.DEBUG)
elif options.verbose:
logging.getLogger().setLevel(logging.INFO)
elif options.quit:
logging.getLogger().setLevel(logging.CRITICAL)
logger = None
logging.debug("parse_options: options %s" % options)
logging.debug("parse_options: args %s" % args)
for i in args:
if not os.path.exists(i):
logging.warning("No such directory: '%s'" % i)
sys.exit(1)
if options.modulename:
basedir = re.compile(options.base)
results = list()
for i in args:
results.append(os.path.join(options.base, get_module_filename(basedir.sub("", i).lstrip("/"), py_compile=options.pyc)))
print(" ".join(results))
sys.exit(0)
if len(args) != 1:
parser.print_help()
sys.exit(1)
logging.info("Compiling bundled templates...")
arg = args[0]
if not arg.endswith(os.path.sep):
arg = "".join((arg, os.path.sep))
env = jinja2.Environment(loader=FileSystemLoader([os.path.dirname(arg)], followlinks=True))
if os.path.isdir(arg):
if options.extensions is not None:
extensions = options.extensions.split(",")
else:
extensions = None
filter_func = make_filter_func(arg, env, extensions, options.all_files)
target = arg
logging.info("Now compiling templates in %s." % arg)
else:
basename = os.path.basename(arg)
filter_func = lambda x: x == basename
target = os.path.dirname(arg)
logging.info("Now compiling a template: %s." % arg)
env.compile_templates(target, extensions=None,
filter_func=filter_func, zip=None, log_function=logger,
ignore_errors=False, py_compile=options.pyc)
logging.info("Finished compiling bundled templates...")
if __name__== "__main__":
logging.getLogger().setLevel(logging.WARNING)
main()
| Python | 137 | 36.40876 | 142 | /jinja2precompiler.py | 0.630634 | 0.625561 |
Frozen/jinja2-precompiler | refs/heads/master | # -*- coding: utf-8 -*-
import jinja2
import pytest
import jinja2precompiler
def test_IndexError():
env = jinja2.Environment(loader=jinja2.FileSystemLoader(["."]))
filter_func = jinja2precompiler.make_filter_func("", env, extensions=["html"], all_files=True)
assert filter_func("test.html") == True
assert filter_func("test.xml") == False
assert filter_func("html") == False
| Python | 13 | 28.846153 | 96 | /tests/test_bugs.py | 0.703608 | 0.688144 |
limkokholefork/Answerable | refs/heads/main | """Spider Tool for Answerable
This file contains the functions used to wrapp requests following
respecful practices, taking into account robots.txt, conditional
gets, caching contente, etc.
"""
import json
import requests
# from random import random as rnd
from time import sleep
from datetime import timedelta as td
import feedparser
from urllib.robotparser import RobotFileParser
from urllib.parse import urlparse
from tools import cache
from tools.displayer import fg, bold, green, yellow, red
from tools.log import log, abort
_rp = {} # robots.txt memory
class _FalseResponse:
"""Object with the required fields to simulate a HTTP response"""
def __init__(self, code, content):
self.status_code = code
self.content = content
def ask_robots(url: str, useragent: str) -> bool:
"""Check if the useragent is allowed to scrap an url
Parse the robot.txt file, induced from the url, and
check if the useragent may fetch a specific url.
"""
url_struct = urlparse(url)
base = url_struct.netloc
if base not in _rp:
_rp[base] = RobotFileParser()
_rp[base].set_url(url_struct.scheme + "://" + base + "/robots.txt")
_rp[base].read()
return _rp[base].can_fetch(useragent, url)
def get(url, delay=2, use_cache=True, max_delta=td(hours=12)):
"""Respectful wrapper around requests.get"""
useragent = "Answerable v0.1"
# If a cached answer exists and is acceptable, then return the cached one.
cache_file = url.replace("/", "-")
if use_cache:
log("Checking cache before petition {}", fg(url, yellow))
hit, path = cache.check("spider", cache_file, max_delta)
if hit:
with open(path, "r") as fh:
res = fh.read().replace("\\r\\n", "")
return _FalseResponse(200, res)
# If the robots.txt doesn't allow the scraping, return forbidden status
if not ask_robots(url, useragent):
log(fg("robots.txt forbids {}", red), url)
return _FalseResponse(403, "robots.txt forbids it")
# Make the request after the specified delay
# log("[{}] {}".format(fg("{:4.2f}".format(delay), yellow), url))
log("Waiting to ask for {}", fg(url, yellow))
log(" in {:4.2f} seconds", delay)
sleep(delay)
headers = {"User-Agent": useragent}
log("Requesting")
res = requests.get(url, timeout=10, headers=headers)
# Exit the program if the scraping was penalized
if res.status_code == 429: # too many requests
abort("Too many requests")
# Cache the response if allowed by user
if use_cache:
cache.update(
"spider", cache_file, res.content.decode(res.encoding), json_format=False
)
return res
def get_feed(url, force_reload=False):
"""Get RSS feed and optionally remember to reduce bandwith"""
useragent = "Answerable RSS v0.1"
log("Requesting feed {}", fg(url, yellow))
cache_file = url.replace("/", "_")
# Get the conditions for the GET bandwith reduction
etag = None
modified = None
if not force_reload:
hit, path = cache.check("spider.rss", cache_file, td(days=999))
if hit:
with open(path, "r") as fh:
headers = json.load(fh)
etag = headers["etag"]
modified = headers["modified"]
log("with {}: {}", bold("etag"), fg(etag, yellow))
log("with {}: {}", bold("modified"), fg(modified, yellow))
# Get the feed
feed = feedparser.parse(url, agent=useragent, etag=etag, modified=modified)
# Store the etag and/or modified headers
if feed.status != 304:
etag = feed.etag if "etag" in feed else None
modified = feed.modified if "modified" in feed else None
new_headers = {
"etag": etag,
"modified": modified,
}
cache.update("spider.rss", cache_file, new_headers)
log("Stored new {}: {}", bold("etag"), fg(etag, green))
log("Stored new {}: {}", bold("modified"), fg(modified, green))
return feed
| Python | 127 | 31.047245 | 85 | /tools/spider.py | 0.624079 | 0.617199 |
limkokholefork/Answerable | refs/heads/main | """Recommender Tool for Answerable
This file contains the recommendation algorithm.
"""
from bs4 import BeautifulSoup as bs
from sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer
from sklearn.metrics.pairwise import linear_kernel
def recommend(user_qa, feed):
answered = [
x[0]["title"] + " " + bs(x[0]["body"], "html.parser").getText(" ", strip=True)
for x in user_qa
]
tags_ans = [" ".join(x[0]["tags"]) for x in user_qa]
questions = [x["title"] + x["body"] for x in feed]
tags_unans = [" ".join(x["tags"]) for x in feed]
nans = len(answered)
nunans = len(questions)
"""
The following code is an adapted version of the Content-Based recommmender
described in this tutorial:
https://www.datacamp.com/community/tutorials/recommender-systems-python
"""
tfidf = TfidfVectorizer(stop_words="english")
count = CountVectorizer(stop_words="english")
# list of vectorized body and tags
tfidf_matrix = tfidf.fit_transform(answered + questions)
count_matrix = count.fit_transform(tags_ans + tags_unans)
# similarity matrices: without and with tags
cosine_sim_body = linear_kernel(tfidf_matrix, tfidf_matrix)
cosine_sim_tags = linear_kernel(count_matrix, count_matrix) + cosine_sim_body
# rows: unanswered, cols: answered
unans_similarity_body = cosine_sim_body[nans:, :nans]
unans_similarity_tags = cosine_sim_tags[nans:, :nans]
# form of the following lists: [(feed index, value)]
sum_sim_body = enumerate([sum(r) for r in unans_similarity_body])
max_sim_body = enumerate([max(r) for r in unans_similarity_body])
sum_sim_tags = enumerate([sum(r) for r in unans_similarity_tags])
max_sim_tags = enumerate([max(r) for r in unans_similarity_tags])
# sort the indices by the value
sort_sum_sim_body = sorted(sum_sim_body, key=lambda x: x[1], reverse=True)
sort_max_sim_body = sorted(max_sim_body, key=lambda x: x[1], reverse=True)
sort_sum_sim_tags = sorted(sum_sim_tags, key=lambda x: x[1], reverse=True)
sort_max_sim_tags = sorted(max_sim_tags, key=lambda x: x[1], reverse=True)
# map each index to its classifications
by_sum_body = {x[0]: i for i, x in enumerate(sort_sum_sim_body)}
by_max_body = {x[0]: i for i, x in enumerate(sort_max_sim_body)}
by_sum_tags = {x[0]: i for i, x in enumerate(sort_sum_sim_tags)}
by_max_tags = {x[0]: i for i, x in enumerate(sort_max_sim_tags)}
# compute the mean classification for each index
mean_index = []
for i in range(nunans):
mean = (by_sum_body[i] + by_sum_tags[i] + by_max_body[i] + by_max_tags[i]) / 4
mean_index.append((mean, i))
# build the final recommended feed order
by_mean = [x[1] for x in sorted(mean_index)]
return by_mean, None
| Python | 74 | 37.135136 | 86 | /models/content_based_0.py | 0.663714 | 0.658753 |
limkokholefork/Answerable | refs/heads/main | """Cache Tool for Answerable
This file contains the functions to access and modify cached content.
It may be used by different modules, so each function requires a category argument
to avoid collisions.
As every function is intended to serve a secondary role in extern functions, the
logs have an extra level of indentation.
"""
import json
import pathlib
from datetime import datetime as dt
from datetime import timedelta as td
from tools.log import log
from tools.displayer import fg, green, magenta
__cache_dir = ".cache"
def check(category: str, _file: str, max_delta: td) -> (bool, pathlib.Path):
"""Return if a file is cached and where it is located.
Returns:
(B, P) where
- B is true if the content is cached and usable
- P is the path where the cached content is/should be.
Parameters:
category: Folder inside the cache.
_file: File name to look for.
max_delta: Timedelta used as threshold to consider a file too old.
"""
# Prepare the path to the cached file
subpath = pathlib.Path(category) / _file
path = pathlib.Path.cwd() / __cache_dir / subpath
path.parent.mkdir(parents=True, exist_ok=True)
try:
if not path.exists():
log(" Miss {}", fg(subpath, magenta))
return False, path
else:
# Check if the file is too old
log(" Hit {}", fg(subpath, green))
modified = dt.fromtimestamp(path.stat().st_mtime)
now = dt.now()
delta = now - modified
log(" Time passed since last fetch: {}", delta)
valid = delta < max_delta
if valid:
log(fg(" Recent enough", green))
else:
log(fg(" Too old", magenta))
return valid, path
except OSError as err:
log(" {}: {}", err, fg(subpath, magenta))
return False, path
def update(category: str, _file: str, obj, json_format=True):
"""Update or create a file in the cache
Parameters:
category: Folder inside the cache.
_file: File name to store in.
obj: Serializable object to store.
"""
subpath = pathlib.Path(category) / _file
path = pathlib.Path.cwd() / __cache_dir / subpath
path.parent.mkdir(parents=True, exist_ok=True)
try:
with open(path, "w") as fh:
if json_format:
json.dump(obj, fh, indent=2)
else:
fh.write(obj)
log(" Cache updated: {}", fg(subpath, green))
except OSError as err:
log(" {}: {}", err, fg(subpath, magenta))
return False, path
| Python | 85 | 29.788235 | 82 | /tools/cache.py | 0.604722 | 0.604341 |
limkokholefork/Answerable | refs/heads/main | """Fetcher Tool for Answerable
This file contains the high level functions in charge of data retrieval.
It provides a interface between the spider/crawler and another level of
cacheable information.
"""
import math
import json
from datetime import timedelta as td
from bs4 import BeautifulSoup
from tools import spider, cache
from tools.log import log, abort
from tools.displayer import fg, magenta, green, bold
cache_where = "fetcher"
cache_threshold = td(hours=12)
def get_questions(question_ids):
"""Retrieve questions from Stack Overflow
- question_ids: list of question IDs
Returns a list of objects with the following attributes:
{
"tags": [string],
"answers": [ {"owner": {"user_id": int}} ],
"score": int,
"creation_date": timestamp,
"question_id": int,
"link": string,
"title": string,
"body": string (html)
}
"""
# about this request: https://api.stackexchange.com/docs/questions-by-ids#page=1&pagesize=100&order=desc&sort=creation&ids=67519195&filter=!)So8N7tfWBeyaWUex((*Ndu7tpA&site=stackoverflow
api_request_f = "https://api.stackexchange.com//2.2/questions/{}?page={}&pagesize=100&order=desc&sort=creation&site=stackoverflow&filter=!)So8N7tfWBeyaWUex((*Ndu7tpA"
max_ids = 100 # no more than 100 ids allowed at once
k = math.ceil(len(question_ids) / max_ids)
log(f"{len(question_ids)} questions, {k} batches")
questions = []
for i in range(k):
log(f"batch {i+1}")
batch_begin = i * max_ids
batch_end = i * max_ids + max_ids
subset = ";".join(question_ids[batch_begin:batch_end])
page = 1
while True:
api_request = api_request_f.format(subset, page)
response = spider.get(
api_request, delay=0.5, use_cache=False
) # urls too long to cache
if response.status_code != 200:
abort(response)
result = json.loads(response.content)
questions += result["items"]
if not result["has_more"]:
break
page += 1
return questions
def get_user_answers(user_id, force_reload=False, max_page=math.inf):
"""Retrieve answers from a Stack Overflow user
- user_id: user ID
Returns a list of objects with the following attributes:
{
"is_accepted": bool,
"score": int,
"questions_id": int,
"link": string,
"title": string,
"body": string (html),
}
"""
api_request_f = "https://api.stackexchange.com/2.2/users/{}/answers?page={}&pagesize=100&order=desc&sort=activity&site=stackoverflow&filter=!37n)Y*a2Ut6eDilfH4XoIior(X(b8nm7Z-g)Tgl*A4Qdfe8Mcn-Luu"
page = 1
answers = []
while page <= max_page:
api_request = api_request_f.format(user_id, page)
response = spider.get(
api_request, delay=0.5, max_delta=td() if force_reload else td(hours=12)
)
if response.status_code != 200:
abort(response)
result = json.loads(response.content)
answers += result["items"]
if not result["has_more"]:
break
page += 1
return answers
def get_QA(user_id, force_reload=False, max_page=5):
"""Retrieve information about the questions answered by the user
Return
[
(Question_1, Answer_1),
(Question_2, Answer_2),
...
]
See
get_questions, get_user_answers
"""
log(bold("Fetching user information"))
if force_reload:
log(fg("Force reload", magenta))
cache_file = str(user_id) + ".json"
# Check cache
if not force_reload:
hit, fpath = cache.check(cache_where, cache_file, cache_threshold)
if hit:
with open(fpath) as fh:
stored = json.load(fh)
return stored
# Get the answers
answers = get_user_answers(user_id, force_reload, max_page)
# Get the questions
q_ids = [str(a["question_id"]) for a in answers]
questions = get_questions(q_ids)
# Join answers and questions
user_qa = [
(q, a)
for q in questions
for a in answers
if q["question_id"] == a["question_id"]
]
cache.update(cache_where, cache_file, user_qa)
for q, a in user_qa:
a["tags"] = q["tags"]
## Include questions specified by user
try:
with open("include.txt", "r") as f:
extra_q_ids = f.read().split()
log("Aditional training: " + str(extra_q_ids))
extra_questions = get_questions(extra_q_ids)
except FileNotFoundError:
extra_questions = []
log("No additional training specified by user")
user_qa += [(q, None) for q in extra_questions]
return user_qa
def get_question_feed(url, force_reload=False):
"""Retrieve the last questions of the feed
Returns a structure with the following format:
[Question_1, Question_2, ...]
where Question_n has the following keys:
link: str
title: str
body: str (html)
tags: list of str
"""
log(bold("Fetching question feed"))
if force_reload:
log(fg("Force reload", magenta))
feed = spider.get_feed(url, force_reload=force_reload)
if feed.status == 304: # Not Modified
log(fg("Feed not modified since last retrieval (status 304)", magenta))
return []
log("Number of entries in feed: {}", fg(len(feed.entries), green))
questions = []
for entry in feed.entries:
soup = BeautifulSoup(entry.summary, "html.parser")
q = {
"link": entry.link,
"title": entry.title,
"body": soup.getText(" ", strip=True),
"tags": [x["term"] for x in entry.tags],
}
questions.append(q)
return questions
def get_user_tags(filename):
"""Parse the tags file and return the user followed and ignored tags"""
try:
with open(filename, "r") as fh:
bs = BeautifulSoup(fh.read(), "html.parser")
return {
"followed": [
x.getText(" ", strip=True)
for x in bs.find(id="watching-1").find_all("a", class_="post-tag")
],
"ignored": [
x.getText(" ", strip=True)
for x in bs.find(id="ignored-1").find_all("a", class_="post-tag")
],
}
except FileNotFoundError:
abort("File not found: {}", filename)
| Python | 207 | 30.492754 | 200 | /tools/fetcher.py | 0.58644 | 0.574475 |
limkokholefork/Answerable | refs/heads/main | """Recommender Tool for Answerable
This file contains the recommendation algorithm.
"""
import tools.displayer
from bs4 import BeautifulSoup as bs
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import linear_kernel
import numpy as np
import re
def preprocessed_text_from_html(html):
soup = bs(html, "html.parser")
for tag in soup.findAll(name="code"):
tag.decompose()
text = soup.getText(" ", strip=True)
text = re.sub(r"\d+", "", text)
text = " ".join(re.findall(r"[\w+_]+", text))
return text.lower()
def recommend(user_qa, feed):
answered = [
" ".join(x["tags"])
+ " "
+ x["title"].lower()
+ " "
+ preprocessed_text_from_html(x["body"])
for [x, _] in user_qa
]
unanswered = [
" ".join(x["tags"])
+ " "
+ x["title"].lower()
+ " "
+ preprocessed_text_from_html(x["body"])
for x in feed
]
nans = len(answered)
tfidf = TfidfVectorizer(stop_words="english")
# list of vectorized text
tfidf_matrix = tfidf.fit_transform(answered + unanswered)
# similarity matrix of each answer with the rest
cosine_sim = linear_kernel(tfidf_matrix, tfidf_matrix)
# rows: unanswered, cols: answered
unans_similarity = cosine_sim[nans:, :nans]
# index: unanswered. values: max similarity, text size and score
max_sim = list(enumerate([max(r) for r in unans_similarity]))
unans_sizes = [len(u.split()) for u in unanswered]
score = [x * x * unans_sizes[i] for i, x in max_sim]
# sort the indices by the value
by_score = sorted(list(enumerate(score)), key=lambda x: x[1], reverse=True)
# relation between index in feed and index of closest answered
closest = [
(i, np.where(np.isclose(unans_similarity[i], v))[0][0]) for i, v in max_sim
]
# store displayable information
b = tools.displayer.bold
info_f = "{}: {{}}\n{}:{{}} {}: {{}} {}: {{}}".format(
b("Closest"),
b("Text size"),
b("Similarity"),
b("Score"),
)
info = []
for unans, ans in closest:
info.append(
info_f.format(
user_qa[ans][0]["title"],
unans_sizes[unans],
f"{100*max_sim[unans][1]:.2f}%",
f"{score[unans]:.2f}",
)
)
# get the indexes, now sorted
sorted_index = [x[0] for x in by_score]
return sorted_index, info
| Python | 92 | 26.27174 | 83 | /models/content_based_1.py | 0.57234 | 0.567557 |
limkokholefork/Answerable | refs/heads/main | import re
import json
import argparse
import datetime
import textwrap
import importlib
from urllib.error import URLError
from tools import fetcher, displayer, log, spider
_current_version = "v1.1"
def latest_version():
try:
res = spider.get(
"https://api.github.com/repos/MiguelMJ/Answerable/releases/latest", 0
)
if res.status_code != 200:
log.warn("Unable to get information from latest version")
return None
latest = re.search(r"v[\d.]+.?", json.loads(res.content)["name"])[0]
return latest
except URLError:
log.warn("Unable to get information from latest version")
return None
_config_file = ".config"
def get_user_tags(args):
"""Return the tags if the args contain the tags file
If the user used the -t option, parse the specified file. Otherwise,
return None
"""
if args.tags is not None:
return fetcher.get_user_tags(args.tags)
else:
log.log("No tags file provided.")
return None
def load_config(args) -> dict:
"""Return the user configuration
If the _config_file exists, return its contents. Otherwise, extract the
the configuration from the options -u, -t and -m
"""
try:
with open(_config_file) as fh:
file_config = json.load(fh)
except IOError:
file_config = {}
finally:
default_config = {"model": "content_based_1"}
cli_config = {"user": args.user, "tags": args.tags, "model": args.model}
cli_config = {k: v for k, v in cli_config.items() if v is not None}
config = {**default_config, **file_config, **cli_config}
if config["user"] is None:
log.abort(".config not found: provide user id with -u option")
return config
def save_config(args):
"""Store the user configuration
Create or overwrite the configuration file with the configuration extracted
from the options -u and -t.
"""
with open(_config_file, "w") as fh:
tags = get_user_tags(args)
json.dump(
{"user": args.user, "tags": tags, "model": args.model or "content_based_1"},
fh,
indent=2,
)
log.log("Configuration saved in {}", _config_file)
def summary(args):
"""Display a summary of the answered questions"""
config = load_config(args)
qa = fetcher.get_QA(config["user"], force_reload=args.f)
qa = [(q, a) for q, a in qa if a is not None]
displayer.disp_statistics(qa)
def recommend(args):
"""Recommend questions from the latest unanswered"""
filtered = {"hidden": 0, "closed": 0, "duplicate": 0}
def valid_entry(entry):
"""Check if a entry should be taken into account"""
if len(set(entry["tags"]) & hide_tags) > 0:
filtered["hidden"] += 1
return False
if entry["title"][-8:] == "[closed]":
filtered["closed"] += 1
return False
if entry["title"][-11:] == "[duplicate]":
filtered["duplicate"] += 1
return False
return True
def cf(x):
"""Color a value according to its value"""
return (
displayer.fg(x, displayer.green)
if x == 0
else displayer.fg(x, displayer.magenta)
)
# Load configuration
config = load_config(args)
# Load the model
try:
model_name = config["model"]
log.log("Loading model {}", displayer.fg(model_name, displayer.yellow))
model = importlib.import_module(f".{model_name}", "models")
log.log(
"Model {} succesfully loaded", displayer.fg(model_name, displayer.green)
)
except ModuleNotFoundError as err:
if err.name == f"models.{model_name}":
log.abort("Model {} not present", model_name)
else:
log.abort("Model {} unsatisfied dependency: {}", model_name, err.name)
# Get user info and feed
user_qa = fetcher.get_QA(config["user"], force_reload=args.f)
if args.all or "tags" not in config:
tags = ""
else:
tags = "tag?tagnames="
tags += "%20or%20".join(config["tags"]["followed"]).replace("+", "%2b")
tags += "&sort=newest"
url = "https://stackoverflow.com/feeds/" + tags
try:
feed = fetcher.get_question_feed(url, force_reload=args.F)
if len(feed) == 0:
raise ValueError("No feed returned")
# Filter feed from ignored tags
hide_tags = (
set()
if args.all or "tags" not in config
else set(config["tags"]["ignored"])
)
useful_feed = [e for e in feed if valid_entry(e)]
if len(useful_feed) == 0:
raise ValueError("All feed filtered out")
log.log(
"Discarded: {} ignored | {} closed | {} duplicate",
cf(filtered["hidden"]),
cf(filtered["closed"]),
cf(filtered["duplicate"]),
)
# Make the recommendation
log.log(f"Corpus size: {len(user_qa)} Feed size: {len(useful_feed)}")
rec_index, info = model.recommend(user_qa, useful_feed)
selection = [useful_feed[i] for i in rec_index[: args.limit]]
if args.info and info is None:
log.warn("Info requested, but model {} returns None", model_name)
elif args.info and info is not None:
info = [info[i] for i in rec_index[: args.limit]]
displayer.disp_feed(selection, info, args.info)
except ValueError as err:
log.warn(err)
log.print_advice()
def parse_arguments() -> argparse.Namespace:
"""Parse the command line arguments
Parse sys.argv into a Namespace, that will be used in the rest of the
functions.
"""
parser = argparse.ArgumentParser(
usage="%(prog)s COMMAND [OPTIONS]",
description=f"Answerable {_current_version}\nStack Overflow unanswered questions recommendation system",
formatter_class=argparse.RawDescriptionHelpFormatter,
epilog=textwrap.dedent(
"""\
Code: https://github.com/MiguelMJ/Answerable
Documentation: in https://github.com/MiguelMJ/Answerable/wiki
"""
),
)
parser.add_argument(
"command",
choices=("save", "summary", "recommend"),
help="save,summary,recommend",
metavar="COMMAND",
)
parser.add_argument(
"-v",
"--verbose",
help="show the log content in stderr too",
action="store_true",
)
parser.add_argument(
"-i",
"--info",
help="print extra info on each recomendation",
action="store_true",
)
parser.add_argument("--no-ansi", help="print without colors", action="store_true")
parser.add_argument("-f", help="force reload of user data", action="store_true")
parser.add_argument(
"-F", help="force retrieval of question feed", action="store_true"
)
parser.add_argument(
"-l",
"--limit",
help="limit the number of items displayed",
type=int,
default=999,
metavar="N",
)
parser.add_argument(
"-a",
"--all",
help="don't use tags to filter the feed. If the user tags haven't been saved before with the <save> command, this option is on by default",
action="store_true",
)
parser.add_argument(
"-u", "--user", help="identifier of Stack Overflow user", metavar="ID"
)
parser.add_argument(
"-t",
"--tags",
help="file with the source of the page with the user followed and ignored tags",
metavar="FILE",
)
parser.add_argument(
"-m",
"--model",
help="specify the recommendation model you want to use",
metavar="MODEL",
)
args = parser.parse_args()
if args.no_ansi:
displayer.ansi = False
return args
if __name__ == "__main__":
_latest_version = latest_version()
if _latest_version is not None and _latest_version != _current_version:
log.warn(
f"New version on GitHub: {_latest_version} (current is {_current_version})"
)
switch = {
"save": save_config,
"summary": summary,
"recommend": recommend,
}
args = parse_arguments()
command = args.command
log.add_log("answerable.log")
if args.verbose:
log.add_stderr()
log.log(displayer.bold("Log of {}"), datetime.datetime.now())
switch[command](args)
log.close_logs()
| Python | 281 | 29.540926 | 147 | /answerable.py | 0.57807 | 0.574458 |
limkokholefork/Answerable | refs/heads/main | """Statistics Tool for Answerable
This file contains the functions used to analyze user answers.
"""
#
# TAG RELATED METRICS (USING QA)
#
_tags_info = None
def tags_info(qa):
"""Map each tag to its score, acceptance and count"""
global _tags_info
if _tags_info is not None:
return _tags_info
tags_info = {}
for _, a in qa:
for t in a["tags"]:
tc = tags_info.get(t, (0, 0, 0)) # (score, acceptance, count)
tc = (tc[0] + a["score"], tc[1] + a["is_accepted"], tc[2] + 1)
tags_info[t] = tc
_tags_info = tags_info
return tags_info
def top_tags_use(qa, top=5):
"""Top tags by appearance"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][2], reverse=True)
return [(x, tags[x][2]) for x in sorted_tags][:top]
def top_tags_score_abs(qa, top=5):
"""Top tags by accumulated score"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][0], reverse=True)
return [(x, tags[x][0]) for x in sorted_tags][:top]
def top_tags_acceptance_abs(qa, top=5):
"""Top tags by accumulated acceptance"""
tags = tags_info(qa)
sorted_tags = sorted(
tags,
key=lambda x: tags[x][1],
reverse=True,
)
return [(x, tags[x][1]) for x in sorted_tags][:top]
def top_tags_score_rel(qa, top=5):
"""Top tags by score per answer"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][0] / tags[x][2], reverse=True)
return [(x, tags[x][0] / tags[x][2]) for x in sorted_tags][:top]
def top_tags_acceptance_rel(qa, top=5):
"""Top tags by acceptance per answer"""
tags = tags_info(qa)
sorted_tags = sorted(tags, key=lambda x: tags[x][1] / tags[x][2], reverse=True)
return [(x, tags[x][1] / tags[x][2]) for x in sorted_tags][:top]
#
# ANSWER RELATED METRICS
#
def top_answers(answers, top=5):
"""Top answers by score"""
return sorted(answers, key=lambda x: x["score"], reverse=True)[:top]
def top_accepted(answers, top=5):
"""Top accepted answers by score"""
return list(
filter(
lambda x: x["is_accepted"],
sorted(answers, key=lambda x: x["score"], reverse=True),
)
)[:top]
#
# REPUTATION RELATED METRICS
#
def reputation(answer):
"""Reputation associated to an answers
NOT ACCURATE
"""
return answer["score"] * 10 + answer["is_accepted"] * 15
_answers_sorted_reputation = None
_total_reputation = None
def answers_sorted_reputation(answers):
"""Answers sorted by associated reputation"""
global _answers_sorted_reputation
if _answers_sorted_reputation is None:
_answers_sorted_reputation = sorted(
answers, key=lambda x: reputation(x), reverse=True
)
return _answers_sorted_reputation
def total_reputation(answers):
"""Total reputation gained from answers"""
global _total_reputation
if _total_reputation is None:
_total_reputation = sum([reputation(a) for a in answers])
return _total_reputation
def average_reputation_weight(answers, w):
"""Average reputation and weight of answers generating w % reputation"""
repw = total_reputation(answers) * w
sorted_answers = answers_sorted_reputation(answers)
acc_rep = 0
acc_ans = 0
while acc_rep < repw and acc_ans < len(sorted_answers):
acc_rep += reputation(sorted_answers[acc_ans])
acc_ans += 1
if acc_ans == 0:
return (0, 0)
return (acc_rep / acc_ans, 100 * acc_ans / len(answers))
#
# LISTS TO SIMPLIFY CALLING
#
tag_metrics = [ # call with qa
("Top used tags", top_tags_use),
("Top tags by accumulated score", top_tags_score_abs),
("Top tags by score per answer", top_tags_score_rel),
("Top tags by accumulated acceptance", top_tags_acceptance_abs),
("Top tags by acceptance per answer", top_tags_acceptance_rel),
]
answer_metrics_single = [ # call with answers
("Answers analyzed", len),
("Total score", lambda x: sum([a["score"] for a in x])),
("Average score", lambda x: sum([a["score"] for a in x]) / len(x)),
("Total accepted", lambda x: sum([a["is_accepted"] for a in x])),
("Acceptance ratio", lambda x: sum([a["is_accepted"] for a in x]) / len(x)),
]
answer_metrics_tops = [ # call with answers
("Top answers by score", top_answers, lambda a: a["score"]),
("Top accepted answers by score", top_accepted, lambda a: a["score"]),
]
reputation_metrics_single = [ # call with answers
("Total reputation", lambda x: sum([reputation(a) for a in x])),
("Average reputation", lambda x: sum([reputation(a) for a in x]) / len(x)),
]
reputation_weight_metrics = ( # call with answers and weights
[0.95, 0.80],
average_reputation_weight,
(
"Average reputation on answers generating {:.0f}% reputation",
"Percentage of answers generating {:.0f}% reputation",
),
)
| Python | 174 | 27.413794 | 83 | /tools/statistics.py | 0.618528 | 0.608617 |
limkokholefork/Answerable | refs/heads/main | """Displayer Tool for Answerable
This file contains the functions and variables used to present the data.
"""
import tools.statistics as st
#
# COLOR RELATED VARIABLES AND FUNCTIONS
#
red = (250, 0, 0)
green = (0, 250, 0)
blue = (0, 0, 250)
cyan = (0, 250, 250)
magenta = (250, 0, 250)
yellow = (250, 250, 0)
"""
white = (250, 250, 250)
gray1 = (200, 200, 200)
gray2 = (150, 150, 150)
gray3 = (100, 100, 100)
gray4 = (50, 50, 50)
black = (0, 0, 0)
"""
def lighten(c, r):
dr = (250 - c[0]) * r
dg = (250 - c[1]) * r
db = (250 - c[2]) * r
return (int(c[0] + dr), int(c[1] + dg), int(c[2] + db))
def darken(c, r):
dr = c[0] * r
dg = c[1] * r
db = c[2] * r
return (int(c[0] - dr), int(c[1] - dg), int(c[2] - db))
"""
def interpolate(c, d, r):
dr = (d[0] - c[0]) * r
dg = (d[1] - c[1]) * r
db = (d[2] - c[2]) * r
return (int(c[0] + dr), int(c[1] + dg), int(c[2] + db))
"""
#
# ANSI RELATED VARIABLES AND FUNCTIONS
#
ansi = True
def bold(msg):
if not ansi:
return msg
return "\033[1m{}\033[0m".format(msg)
def fg(msg, color):
if not ansi:
return msg
return "\033[38;2;{:03};{:03};{:03}m{}\033[0m".format(
color[0], color[1], color[2], msg
)
def bg(msg, color):
if not ansi:
return msg
return "\033[48;2;{:03};{:03};{:03}m{}\033[0m".format(
color[0], color[1], color[2], msg
)
def color(msg, fgc, bgc):
return bg(fg(msg, fgc), bgc)
#
# DATA DISPLAY FUNCTIONS
#
def disp_feed(feed, info, print_info=False):
def title(x):
return fg(bold(x), lighten(blue, 0.3))
def tag(x):
return fg(f"[{x}]", darken(cyan, 0.2))
for i in range(len(feed)):
entry = feed[i]
print("o", title(entry["title"]))
print(" ", " ".join(tag(t) for t in entry["tags"]))
print(" ", entry["link"])
if print_info and info is not None:
print(" ", info[i].replace("\n", "\n "))
def table(data, align=""):
cols = len(data[0])
widths = []
for i in range(0, cols):
col = [x[i] for x in data]
widths.append(max([len(str(c)) for c in col]))
row_f = " ".join(["{{:{}{}}}".format(align, w) for w in widths])
for d in data:
print(row_f.format(*d))
def disp_statistics(user_qa):
ans_f = fg("{}", lighten(blue, 0.3))
tag_f = fg("[{}]", darken(cyan, 0.2))
val_f = bold(fg("{}", green))
def print_section(txt):
print(bold(txt.upper()))
print()
def print_metric(txt):
def mark(x):
return bold(x)
print(mark(txt))
def print_answer_and_value(answer, value):
tags = answer["tags"]
print(val_f.format(value), ans_f.format(answer["title"]))
print(" " * len(str(value)), " ".join([tag_f.format(t) for t in tags]))
user_answers = [a for q, a in user_qa]
print_section("Answer metrics")
metrics = [
(bold(k), val_f.format(m(user_answers))) for k, m in st.answer_metrics_single
]
table(metrics)
print()
for (name, metric, key) in st.answer_metrics_tops:
print_metric(name)
results = metric(user_answers)
for a in results:
print_answer_and_value(a, key(a))
print()
print_section("Tag metrics")
for (name, metric) in st.tag_metrics:
print_metric(name)
results = metric(user_qa)
results = [(tag_f.format(r[0]), val_f.format(r[1])) for r in results]
table(results)
print()
print_section("Reputation metrics")
metrics = [
(bold(k), val_f.format(m(user_answers)))
for k, m in st.reputation_metrics_single
]
table(metrics)
print()
for w in st.reputation_weight_metrics[0]:
results = st.reputation_weight_metrics[1](user_answers, w)
for i, info in enumerate(st.reputation_weight_metrics[2]):
print_metric(info.format(w * 100))
print(val_f.format(results[i]))
| Python | 169 | 22.508875 | 85 | /tools/displayer.py | 0.536622 | 0.491568 |
limkokholefork/Answerable | refs/heads/main | """Log Tool for Answerable
This file contains the functions used to log control data and debug messages
in a unified format.
"""
import re
import sys
import inspect
from tools.displayer import bold, red, magenta, fg
_logs = [] # list of file handlers
_ansire = re.compile("\\033\[[^m]+m") # ansi escape sequences
def _strip_ansi(msg):
"""Strip ansi escape sequences"""
return re.sub(_ansire, "", msg)
def _get_caller():
frm = inspect.stack()[2]
return inspect.getmodule(frm[0]).__name__
def add_stderr():
"""Add the stderr to the log file handlers"""
_logs.append(sys.stderr)
def add_log(logfile):
"""Open a new file and add it to the log file handlers"""
_logs.append(open(logfile, "w"))
def close_logs():
"""Close all log file handlers."""
for f in _logs:
if f is not sys.stderr:
f.close()
def advice_message():
"""Returns the advice of where to find the full logs"""
lognames = ", ".join([fh.name for fh in _logs if fh is not sys.stderr])
return "Full log in " + lognames
def abort(msg, *argv):
"""Print an error message and aborts execution"""
if sys.stderr not in _logs:
add_stderr()
log(fg(msg, red), *argv, who=_get_caller())
print_advice()
close_logs()
exit()
def warn(msg, *argv):
"""Print an error message and aborts execution"""
err_off = sys.stderr not in _logs
if err_off:
add_stderr()
log(fg(msg, magenta), *argv, who=_get_caller())
_logs.pop()
def print_advice():
"""Print where to find the full log if necessary"""
if sys.stderr not in _logs:
print(advice_message(), file=sys.stderr)
def log(msg, *argv, **kargs):
"""Print to logs a formatted message"""
who = kargs["who"] if "who" in kargs else _get_caller()
who = f"[{who}] "
textf = who + _strip_ansi(msg.format(*argv))
texts = bold(who) + msg.format(*argv)
for f in _logs:
if f is sys.stderr:
print(texts, file=f)
sys.stderr.flush()
else:
print(textf, file=f)
| Python | 93 | 21.440861 | 76 | /tools/log.py | 0.605175 | 0.602779 |
prozoroff/files | refs/heads/master | import time
import threading
import os
import pwd
import grp
from client import Client
class BtsyncHelper:
global client
client = Client(host='127.0.0.1', port='8888', username='admin', password='******')
def get_folders(self):
return client.sync_folders
def check_folder(self, folder_path):
for f in self.get_folders():
if f['name'] == folder_path:
return True
return False
def create_folder(self, path):
secret = client.generate_secret()
return self.add_folder(path, secret['secret'])
def add_folder(self, path, secret):
if not os.path.exists(path):
os.makedirs(path)
if self.check_folder(path) == True:
return 'Folder: ' + str(path) + ' already synchronized'
uid = pwd.getpwnam('root').pw_uid
os.chown(path, uid, -1)
print 'Trying to open directory: ' + path
client.add_sync_folder(path, secret)
file = open(path + '/readme', 'a')
file.write('This file automatically created for testing synchronization by BitTorrent Sync')
file.close()
os.chown(path + '/readme', uid, -1)
return str(path) + " created! Secret: " + secret
| Python | 44 | 29.568182 | 104 | /btsynchelper.py | 0.552416 | 0.543494 |
CaptainCodex/relevancy-ranker | refs/heads/master | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from IPython.display import display
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn import metrics
customers = pd.read_csv('StudentsPerformance.csv')
display(customers.head())
customers.head()
customers.info()
display(customers.describe())
sns.jointplot('reading score', 'writing score', data=customers)
sns.pairplot(customers)
sns.lmplot('reading score', 'writing score', data=customers)
X = customers[['writing score', 'reading score', 'math score']]
y = customers[['math score']]
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=101)
lm = LinearRegression()
lm.fit(X_train, y_train)
print(lm.coef_)
predictions = lm.predict(X_test)
plt.scatter(y_test, predictions)
plt.xlabel('Y Test')
plt.ylabel('Predicted Y')
mae = metrics.mean_absolute_error(y_test, predictions)
mse = metrics.mean_squared_error(y_test, predictions)
rmse = np.sqrt(metrics.mean_squared_error(y_test, predictions))
print(mae, mse, rmse)
coeffs = pd.DataFrame(data=lm.coef_.transpose(), index=X.columns, columns=['Coefficient'])
coeffs.plot()
display(coeffs)
plt.show() | Python | 47 | 25.680851 | 90 | /RelevancyRanker.py | 0.75419 | 0.750199 |
ningzy/alex_misc | refs/heads/master | import os
from shutil import copyfile
if os.path.exists(''):
os.remove('')
copyfile('', ) | Python | 7 | 12.714286 | 27 | /filesaveas.py | 0.613861 | 0.613861 |
ningzy/alex_misc | refs/heads/master | import smtplib
import getpass
FROM = 'zning'
TO = 'airportico@gmail.com'
SUBJECT = 'test'
TEXT = 'testtttt'
message = """ from: %s\nto: %s\nsubject: %s\n\n%s""" % (FROM, ", ".join(TO), SUBJECT, TEXT)
try:
server = smtplib.SMTP('smtp.gmail.com', 587)
server.ehlo()
server.starttls()
user = input("User name: ")
pwd = getpass.getpass('Password: ')
server.login(user, pwd)
server.sendmail(FROM, TO, message)
server.close()
print("email sent...")
except:
print("failed...") | Python | 22 | 21.5 | 91 | /sendemail.py | 0.574757 | 0.568932 |
AmosGarner/PyLife | refs/heads/master | import sys, argparse
import numpy as np
import matplotlib.pyplot as plot
import matplotlib.animation as animation
from helper import *
from displayTextSpawner import displayText
from inputValidator import validateInput
paused = True
iteration = 0
def update(frameNumber, image, grid, gridSize):
newGrid = grid.copy()
global paused
global iteration
if paused is True and iteration > 0:
value = raw_input('Press any [Key] to start simulation:')
image.set_data(newGrid)
grid[:] = newGrid[:]
paused = False
else:
for index in range(gridSize):
for subIndex in range(gridSize):
total = int((grid[index, (subIndex-1)%gridSize] + grid[index, (subIndex+1)%gridSize] +
grid[(index-1)%gridSize, subIndex] + grid[(index+1)%gridSize, subIndex] +
grid[(index-1)%gridSize, (subIndex-1)%gridSize] + grid[(index-1)%gridSize, (subIndex+1)%gridSize] +
grid[(index+1)%gridSize, (subIndex-1)%gridSize] + grid[(index+1)%gridSize, (subIndex+1)%gridSize])/ON)
if iteration > 0:
if grid[index, subIndex] == ON:
if (total < 2) or (total > 3):
newGrid[index, subIndex] = OFF
else:
if total == 3:
newGrid[index, subIndex] = ON
image.set_data(newGrid)
grid[:] = newGrid[:]
iteration += 1
return image
def main():
parser = argparse.ArgumentParser(description="Runs Conway's Game of Life simulation.")
parser.add_argument('--grid-size', dest='gridSize', required=False)
parser.add_argument('--mov-file', dest='movfile', required=False)
parser.add_argument('--interval', dest='interval', required=False)
parser.add_argument('--glider', dest='glider', required=False)
parser.add_argument('--gosper', dest='gosper', required=False)
parser.add_argument('--display', dest='displayText', required=False)
args = parser.parse_args()
gridSize = 100
if args.gridSize and int(args.gridSize) > 8:
gridSize = int(args.gridSize)
updateInterval = 50
if args.interval:
updateInterval = int(args.interval)
grid = np.array([])
if args.glider:
grid = np.zeros(gridSize*gridSize).reshape(gridSize, gridSize)
addGlider(1, 1, grid)
elif args.gosper:
grid = np.zeros(gridSize*gridSize).reshape(gridSize, gridSize)
addGosperGliderGun(10, 10, grid)
elif args.displayText and validateInput(args.displayText):
if args.displayText == 'alphanumspec':
grid = displayText('abcdefghijklmnopqrstuvwxyz_0123456789_', gridSize)
elif args.displayText == 'david':
grid = displayText('happy_birthday___david!!!!', gridSize)
else:
grid = displayText(args.displayText, gridSize)
else:
grid = randomGrid(gridSize)
fig, ax = plot.subplots()
img = ax.imshow(grid, interpolation='nearest')
plot.title("PyLife V1.0")
ani = animation.FuncAnimation(fig, update, fargs=(img, grid, gridSize),
frames = 10,
interval=updateInterval,
save_count=50)
if args.movfile:
ani.save(args.movfile, fps=30, extra_args=['-vcodec', 'libx264'])
plot.show()
if __name__ == '__main__':
main()
| Python | 95 | 35.810528 | 131 | /pylife.py | 0.591364 | 0.576494 |
AmosGarner/PyLife | refs/heads/master | from alphaNumLib import *
alphaNumArray = alphaArray + numArray + specialArray
def validateInput(input):
if(checkInAlphaNumSpec(input)):
return True
else:
return False
def checkInAlphaNumSpec(input):
inputCharArray = list(input.lower())
for value in inputCharArray:
if value not in alphaNumArray:
return False
return True
| Python | 16 | 22.8125 | 52 | /inputValidator.py | 0.685039 | 0.685039 |
AmosGarner/PyLife | refs/heads/master | import numpy as np
ON = 255
OFF = 0
vals = [ON, OFF]
def displayText(input, gridSize):
grid = generateBlankGroup(gridSize)
index = 1
x = gridSize / 2
for value in list(input):
print(5 * index)
print(gridSize)
if 5*index >= gridSize:
index = 1
x = gridSize/2 + 6
grid = spawnValue(value, x, 5 * index, grid)
index += 1
return grid
def spawnValue(char, row, col, grid):
if(char == 'a'):
value = np.array([[OFF, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],])
if(char == 'b'):
value = np.array([[ON, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == 'c'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == 'd'):
value = np.array([[ON, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == 'e'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == 'f'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == 'g'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, OFF, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == 'h'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],])
if(char == 'i'):
value = np.array([[ON, ON, ON, ON],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[ON, ON, ON, ON],])
if(char == 'j'):
value = np.array([[OFF, ON, ON, ON],
[OFF, OFF, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == 'k'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, ON, OFF],
[ON, ON, OFF, OFF],
[ON, OFF, ON, OFF],
[ON, OFF, OFF, ON],])
if(char == 'l'):
value = np.array([[ON, ON, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == 'm'):
value = np.array([[ON, ON, ON, ON],
[ON, ON, ON, ON],
[ON, OFF, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],])
if(char == 'n'):
value = np.array([[ON, ON, OFF, ON],
[ON, ON, OFF, ON],
[ON, OFF, ON, ON],
[ON, OFF, ON, ON],
[ON, OFF, OFF, ON],])
if(char == 'o'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == 'p'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, OFF, OFF, OFF],])
if(char == 'q'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, ON, ON],
[ON, ON, ON, ON],])
if(char == 'r'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, OFF],
[ON, OFF, ON, OFF],
[ON, OFF, OFF, ON],])
if(char == 's'):
value = np.array([[OFF, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[OFF, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == 't'):
value = np.array([[ON, ON, ON, ON],
[ON, ON, ON, ON],
[OFF, OFF, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],])
if(char == 'u'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, ON, ON],
[ON, ON, ON, ON],])
if(char == 'v'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[OFF, ON, ON, OFF],])
if(char == 'w'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, OFF, ON],
[ON, ON, ON, ON],
[ON, ON, OFF, ON],])
if(char == 'x'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[OFF, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],])
if(char == 'y'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[OFF, ON, ON, OFF],
[OFF, ON, OFF, OFF],
[OFF, ON, OFF, OFF],])
if(char == 'z'):
value = np.array([[ON, ON, ON, ON],
[OFF, OFF, ON, OFF],
[OFF, ON, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == '0'):
value = np.array([[ON, ON, ON, ON],
[ON, ON, OFF, ON],
[ON, ON, OFF, ON],
[ON, OFF, ON, ON],
[ON, ON, ON, ON],])
if(char == '1'):
value = np.array([[OFF, ON, ON, OFF],
[ON, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],])
if(char == '2'):
value = np.array([[ON, ON, ON, ON],
[OFF, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, ON],])
if(char == '3'):
value = np.array([[ON, ON, ON, ON],
[OFF, OFF, OFF, ON],
[OFF, ON, ON, ON],
[OFF, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == '4'):
value = np.array([[ON, OFF, OFF, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[OFF, OFF, OFF, ON],
[OFF, OFF, OFF, ON],])
if(char == '5'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[OFF, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == '6'):
value = np.array([[ON, ON, OFF, OFF],
[ON, OFF, OFF, OFF],
[ON, ON, ON, OFF],
[ON, OFF, OFF, ON],
[ON, ON, ON, OFF],])
if(char == '7'):
value = np.array([[ON, ON, ON, ON],
[OFF, OFF, OFF, ON],
[OFF, ON, ON, OFF],
[OFF, ON, OFF, OFF],
[OFF, ON, OFF, OFF],])
if(char == '8'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],])
if(char == '9'):
value = np.array([[ON, ON, ON, ON],
[ON, OFF, OFF, ON],
[ON, ON, ON, ON],
[ON, ON, OFF, OFF],
[ON, ON, OFF, OFF],])
if(char == '_'):
value = np.array([[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],])
if(char == '!'):
value = np.array([[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],
[OFF, OFF, OFF, OFF],
[OFF, ON, ON, OFF],])
if(char == '?'):
value = np.array([[OFF, ON, ON, OFF],
[ON, OFF, OFF, ON],
[OFF, OFF, ON, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, ON, OFF],])
if(char == '.'):
value = np.array([[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, OFF, OFF, OFF],
[OFF, ON, ON, OFF],
[OFF, ON, ON, OFF],])
grid[row-2:row+3, col-2:col+2] = value
return grid
def generateBlankGroup(gridSize):
return np.zeros(gridSize*gridSize).reshape(gridSize, gridSize)
| Python | 268 | 38.746269 | 66 | /displayTextSpawner.py | 0.287364 | 0.284829 |
AmosGarner/PyLife | refs/heads/master | import numpy as np
import matplotlib.pyplot as plot
import matplotlib.animation as animation
ON = 255
OFF = 0
vals = [ON, OFF]
def randomGrid(gridSize):
return np.random.choice(vals, gridSize*gridSize, p=[0.2, 0.8]).reshape(gridSize, gridSize)
def addGlider(row, col, grid):
glider = np.array([[OFF, OFF, ON],
[ON, OFF, ON],
[OFF, OFF, OFF]])
grid[row:row+3, col:col+3] = glider
def addGosperGliderGun(row, col, grid):
gun = np.zeros(11*38).reshape(11, 38)
gun[5][1] = gun[5][2] = ON
gun[6][1] = gun[6][2] = ON
gun[3][13] = gun[3][14] = ON
gun[4][12] = gun[4][16] = ON
gun[5][11] = gun[5][17] = ON
gun[6][11] = gun[6][15] = gun[6][17] = gun[6][18] = ON
gun[7][11] = gun[7][17] = ON
gun[8][12] = gun[8][16] = ON
gun[9][13] = gun[9][14] = ON
gun[1][25] = ON
gun[2][23] = gun[2][25] = ON
gun[3][21] = gun[3][22] = ON
gun[4][21] = gun[4][22] = ON
gun[5][21] = gun[5][22] = ON
gun[6][23] = gun[6][25] = ON
gun[7][25] = ON
gun[3][35] = gun[3][36] = ON
gun[4][35] = gun[4][36] = ON
grid[row:row+11, col:col+38] = gun
| Python | 43 | 25.930233 | 94 | /helper.py | 0.514681 | 0.405872 |
AlenaPliusnina/Flask_API | refs/heads/main | import json
from datetime import datetime
from flask import request, make_response
from flask_restful import Resource, Api
from flask import g
from app import app, db
from flask_httpauth import HTTPBasicAuth
from app.models import User, Post, Comment
from app.schemes import posts_schema, post_schema, comment_schema, comments_schema, users_schema, user_schema
api = Api(app, prefix="/api/v1")
auth = HTTPBasicAuth()
@auth.verify_password
def verify_password(username, password):
user = User.query.filter_by(username=username).first()
if not user or not user.verify_password(password):
return False
g.user = user
return True
class UserListResource(Resource):
@auth.login_required
def get(self):
if g.user.username == 'admin':
users = User.query.all()
return users_schema.dump(users)
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'Only the superuser can access.'}
resp = make_response(json.dumps(data), 403)
return resp
def post(self):
body = request.get_json()
user = User(**body)
exist_email = User.query.filter_by(email=user.email).first()
exist_username = User.query.filter_by(username=user.username).first()
if not exist_email and not exist_username:
try:
user.hash_password()
user.save()
data = {'message': 'You registered successfully. Please log in.'}
resp = make_response(json.dumps(data), 201)
return resp
except Exception as e:
return {'message': str(e)}, 401
else:
data = {'message': 'User already exists. Please login.'}
resp = make_response(json.dumps(data), 202)
return resp
class UserResource(Resource):
@auth.login_required
def get(self, user_id):
if g.user.username == 'admin' or g.user.id == user_id:
user = User.query.get_or_404(user_id)
return user_schema.dump(user)
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only access your registration information.'}
resp = make_response(json.dumps(data), 403)
return resp
@auth.login_required
def delete(self, user_id):
user = User.query.get_or_404(user_id)
if user.id == g.user.id or g.user.username == 'admin':
db.session.delete(user)
db.session.commit()
data = {'message': 'The user was successfully deleted.'}
resp = make_response(json.dumps(data), 200)
return resp
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only delete your account.'}
resp = make_response(json.dumps(data), 403)
return resp
class PostListResource(Resource):
def get(self):
posts = Post.query.all()
return posts_schema.dump(posts)
@auth.login_required
def post(self):
new_post = Post(
author_id=g.user.id,
title=request.json['title'],
content=request.json['content'],
publication_datetime=datetime.now(),
)
db.session.add(new_post)
db.session.commit()
return post_schema.dump(new_post)
class PostResource(Resource):
def get(self, post_id):
post = Post.query.get_or_404(post_id)
return post_schema.dump(post)
@auth.login_required
def patch(self, post_id):
post = Post.query.get_or_404(post_id)
if post.author_id == g.user.id:
if 'title' in request.json:
post.title = request.json['title']
if 'content' in request.json:
post.content = request.json['content']
db.session.commit()
return post_schema.dump(post)
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only edit your posts.'}
resp = make_response(json.dumps(data), 403)
return resp
@auth.login_required
def delete(self, post_id):
post = Post.query.get_or_404(post_id)
if post.author_id == g.user.id:
db.session.delete(post)
db.session.commit()
data = {'message': 'The post was successfully deleted.'}
resp = make_response(json.dumps(data), 200)
return resp
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only delete your posts.'}
resp = make_response(json.dumps(data), 403)
return resp
class CommentListResource(Resource):
def get(self):
comments = Comment.query.all()
return comments_schema.dump(comments)
@auth.login_required
def post(self):
new_comment = Comment(
author_id=g.user.id,
post_id=request.json['post_id'],
title=request.json['title'],
content=request.json['content'],
publication_datetime=datetime.now()
)
post = Post.query.filter_by(id=request.json['post_id']).first()
if post:
db.session.add(new_comment)
db.session.commit()
return comment_schema.dump(new_comment)
else:
data = {'error': 'HTTP 404: Not Found',
'message': 'Post with this id was not found.'}
resp = make_response(json.dumps(data), 404)
return resp
class CommentResource(Resource):
def get(self, comment_id):
comment = Comment.query.get_or_404(comment_id)
return comment_schema.dump(comment)
@auth.login_required
def patch(self, comment_id):
comment = Comment.query.get_or_404(comment_id)
if comment.author_id == g.user.id:
if 'title' in request.json:
comment.title = request.json['title']
if 'content' in request.json:
comment.content = request.json['content']
db.session.commit()
return comment_schema.dump(comment)
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only edit your comments.'}
resp = make_response(json.dumps(data), 403)
return resp
@auth.login_required
def delete(self, comment_id):
comment = Comment.query.get_or_404(comment_id)
if comment.author_id == g.user.id:
db.session.delete(comment)
db.session.commit()
data = {'message': 'The comment was successfully deleted.'}
resp = make_response(json.dumps(data), 200)
return resp
else:
data = {'error': 'HTTP 403: Forbidden',
'message': 'You can only delete your comments.'}
resp = make_response(json.dumps(data), 403)
return resp
api.add_resource(UserListResource, '/users')
api.add_resource(UserResource, '/users/<int:user_id>')
api.add_resource(PostListResource, '/posts')
api.add_resource(PostResource, '/posts/<int:post_id>')
api.add_resource(CommentListResource, '/comments')
api.add_resource(CommentResource, '/comments/<int:comment_id>')
| Python | 234 | 30.3547 | 109 | /app/api.py | 0.573842 | 0.561444 |
AlenaPliusnina/Flask_API | refs/heads/main | from datetime import datetime
from flask_bcrypt import generate_password_hash, check_password_hash
from app import db
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True, nullable=False)
username = db.Column(db.String(80), unique=True, nullable=False)
email = db.Column(db.String(120), unique=True, nullable=False)
password = db.Column(db.String(128), nullable=False)
posts = db.relationship('Post', backref='user', lazy='dynamic', cascade="all,delete")
comments = db.relationship('Comment', backref='user', lazy='dynamic', cascade="all,delete")
def hash_password(self):
self.password = generate_password_hash(self.password).decode('utf8')
def verify_password(self, password):
return check_password_hash(self.password, password)
def save(self):
db.session.add(self)
db.session.commit()
def __repr__(self):
return '<User %r>' % self.username
class Post(db.Model):
__tablename__ = 'posts'
id = db.Column(db.Integer, primary_key=True, nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
title = db.Column(db.String(50), nullable=False)
content = db.Column(db.String(256), nullable=False)
publication_datetime = db.Column(db.DateTime(), default=datetime.now(), nullable=False)
comments = db.relationship('Comment', backref='post', lazy='dynamic', cascade="all,delete")
def __repr__(self):
return '<Post %s>' % self.title
class Comment(db.Model):
__tablename__ = 'comments'
id = db.Column(db.Integer, primary_key=True, nullable=False)
post_id = db.Column(db.Integer, db.ForeignKey(Post.id), nullable=False)
author_id = db.Column(db.Integer, db.ForeignKey(User.id), nullable=False)
title = db.Column(db.String(50), nullable=False)
content = db.Column(db.String(256), nullable=False)
publication_datetime = db.Column(db.DateTime(), default=datetime.now(), nullable=False)
def __repr__(self):
return '<Comment %s>' % self.title | Python | 54 | 37.407406 | 95 | /app/models.py | 0.676315 | 0.667149 |
AlenaPliusnina/Flask_API | refs/heads/main | from config import Config
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
app.debug = True
return app
app = create_app()
db = SQLAlchemy(app)
migrate = Migrate(app, db)
from app import api, models
db.create_all() | Python | 21 | 16 | 39 | /app/__init__.py | 0.716292 | 0.716292 |
AlenaPliusnina/Flask_API | refs/heads/main | from flask_marshmallow import Marshmallow
from app import app
from app.models import User, Post, Comment
ma = Marshmallow(app)
class CommentSchema(ma.Schema):
class Meta:
fields = ("id", "post_id", "author_id", "title", "content", "publication_datetime")
model = Comment
ordered = True
class PostSchema(ma.Schema):
class Meta:
fields = ("id", "title", "content", "author_id", "publication_datetime", "comments")
model = Post
ordered = True
comments = ma.Nested(CommentSchema, many=True)
class UserSchema(ma.Schema):
class Meta:
fields = ("id", "username", "email", "password", "posts", "comments")
model = User
ordered = True
posts = ma.Nested(CommentSchema, many=True)
comments = ma.Nested(CommentSchema, many=True)
post_schema = PostSchema()
posts_schema = PostSchema(many=True)
comment_schema = PostSchema()
comments_schema = PostSchema(many=True)
user_schema = UserSchema()
users_schema = UserSchema(many=True) | Python | 39 | 25.23077 | 92 | /app/schemes.py | 0.661448 | 0.661448 |
doanguyen/chasquid | refs/heads/master | #!/usr/bin/env python3
#
# Simple SMTP client for testing purposes.
import argparse
import email.parser
import email.policy
import smtplib
import sys
ap = argparse.ArgumentParser()
ap.add_argument("--server", help="SMTP server to connect to")
ap.add_argument("--user", help="Username to use in SMTP AUTH")
ap.add_argument("--password", help="Password to use in SMTP AUTH")
args = ap.parse_args()
# Parse the email using the "default" policy, which is not really the default.
# If unspecified, compat32 is used, which does not support UTF8.
msg = email.parser.Parser(policy=email.policy.default).parse(sys.stdin)
s = smtplib.SMTP(args.server)
s.starttls()
s.login(args.user, args.password)
# Note this does NOT support non-ascii message payloads transparently (headers
# are ok).
s.send_message(msg)
s.quit()
| Python | 28 | 28.035715 | 78 | /test/util/smtpc.py | 0.747239 | 0.742331 |
doanguyen/chasquid | refs/heads/master | #!/usr/bin/env python
import difflib
import email.parser
import mailbox
import sys
f1, f2 = sys.argv[1:3]
expected = email.parser.Parser().parse(open(f1))
mbox = mailbox.mbox(f2, create=False)
msg = mbox[0]
diff = False
for h, val in expected.items():
if h not in msg:
print("Header missing: %r" % h)
diff = True
continue
if expected[h] == '*':
continue
if msg[h] != val:
print("Header %r differs: %r != %r" % (h, val, msg[h]))
diff = True
def flexible_eq(expected, got):
"""Compare two strings, supporting wildcards.
This functions compares two strings, but supports wildcards on the
expected string. The following characters have special meaning:
- ? matches any character.
- * matches anything until the end of the line.
Returns True if equal (considering wildcards), False otherwise.
"""
posG = 0
for c in expected:
if posG >= len(got):
return False
if c == '?':
posG += 1
continue
if c == '*':
while got[posG] != '\n':
posG += 1
continue
continue
if c != got[posG]:
return False
posG += 1
return True
if not flexible_eq(expected.get_payload(), msg.get_payload()):
diff = True
if expected.is_multipart() != msg.is_multipart():
print("Multipart differs, expected %s, got %s" % (
expected.is_multipart(), msg.is_multipart()))
elif not msg.is_multipart():
exp = expected.get_payload().splitlines()
got = msg.get_payload().splitlines()
print("Payload differs:")
for l in difflib.ndiff(exp, got):
print(l)
sys.exit(0 if not diff else 1)
| Python | 77 | 20.805195 | 70 | /test/util/mail_diff | 0.599762 | 0.592019 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | from geopy.distance import geodesic
from utils.gis import geodistkm
def test_gis():
albuquerque = [35.0844, -106.6504] #(lat,lon)
los_alamos = [35.8800, -106.3031] #(lat,lon)
result1 = geodesic(albuquerque,los_alamos).km
result2 = geodistkm(albuquerque[1],albuquerque[0],los_alamos[1],los_alamos[0])
assert result1 == result2
| Python | 11 | 30.727272 | 82 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/utils/test_gis.py | 0.696275 | 0.598854 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import datetime
class Hurricane:
def __init__(self, center: tuple, extent: float, pcentral: float, deltap: float,
vmax: float, b: float, time: float, initial_datetime: datetime.datetime):
self.center = center # Position of the eye (lon,lat) in radians as tuple.
self.extent = extent # The maximum extent of the hurricane in kilometers.
self.vforward = [] # Forward velocity [ve, vn] in km/hr.
self.pcentral = pcentral # Central pressure in millibars.
self.deltap = deltap # Pressure difference in millibars.
self.vmax = vmax # The maximum gradient wind [ve, vn] in km/hr.
self.b = b # The Holland parameter, conventionally in the range [0.5,2.5]
self.time = time # Time of this trajectory point in hours.
self.ref_time = initial_datetime
def set_vf(self, vf: tuple):
self.vforward = vf
| Python | 18 | 54.444443 | 102 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/hurricane_model/hurricane.py | 0.581162 | 0.577154 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | #!/usr/bin/env python
'''
name: define_base_mesh
authors: Phillip J. Wolfram
This function specifies a high resolution patch for
Chris Jeffrey.
'''
import numpy as np
def cellWidthVsLatLon():
lat = np.arange(-90, 90.01, 1.0)
lon = np.arange(-180, 180.01, 2.0)
km = 1000.0
# in kms
baseRes = 120.0
highRes = 12.0
latC = 20.0
lonC = -155.0
rad = 10.0
theta = np.minimum(np.sqrt(((lat-latC)*(lat-latC))[:,np.newaxis] + ((lon-lonC)*(lon-lonC))[np.newaxis,:])/rad, 1.0)
cellWidth = (baseRes*theta + (1.0-theta)*highRes)*np.ones((lon.size,lat.size))
return cellWidth, lon, lat
| Python | 28 | 20.964285 | 119 | /testing_and_setup/compass/ocean/global_ocean/HI120to12/build_mesh/define_base_mesh.py | 0.611382 | 0.539837 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | # Author: Steven Brus
# Date: April, 2020
# Description: Plots syntetic wind/pressure timeseries on MPAS-O mesh
import netCDF4
import matplotlib.pyplot as plt
import numpy as np
import os
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
plt.switch_backend('agg')
cartopy.config['pre_existing_data_dir'] = \
os.getenv('CARTOPY_DIR', cartopy.config.get('pre_existing_data_dir'))
#######################################################################
#######################################################################
def plot_data(lon_grid,lat_grid,data,var_label,var_abrev,time):
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1,projection=ccrs.PlateCarree())
levels = np.linspace(np.amin(data),np.amax(data),100)
cf = ax1.tricontourf(lon_grid,lat_grid,data,levels=levels,transform=ccrs.PlateCarree())
ax1.set_extent([0, 359.9, -90, 90], crs=ccrs.PlateCarree())
ax1.add_feature(cfeature.LAND, zorder=100)
ax1.add_feature(cfeature.LAKES, alpha=0.5, zorder=101)
ax1.add_feature(cfeature.COASTLINE, zorder=101)
ax1.set_title('interpolated data '+time.strip())
cbar = fig.colorbar(cf,ax=ax1)
cbar.set_label(var_label)
# Save figure
fig.tight_layout()
fig.savefig(var_abrev+'_'+str(i).zfill(4)+'.png',box_inches='tight')
plt.close()
#######################################################################
#######################################################################
if __name__ == '__main__':
grid_file = 'mesh.nc'
data_file = 'out.nc'
grid_nc = netCDF4.Dataset(grid_file,'r')
lon_grid = grid_nc.variables['lonCell'][:]*180.0/np.pi
lat_grid = grid_nc.variables['latCell'][:]*180.0/np.pi
data_nc = netCDF4.Dataset(data_file,'r')
u_data = data_nc.variables['windSpeedU'][:]
v_data = data_nc.variables['windSpeedV'][:]
p_data = data_nc.variables['atmosPressure'][:]
xtime = data_nc.variables['xtime'][:]
for i in range(u_data.shape[0]-1):
print('Plotting vel: '+str(i))
data = np.sqrt(np.square(u_data[i,:]) + np.square(v_data[i,:]))
time_ls = [x.decode("utf-8") for x in xtime[i]]
time = ''.join(time_ls)
plot_data(lon_grid,lat_grid,data,'velocity magnitude','vel',time)
plot_data(lon_grid,lat_grid,p_data[i,:],'atmospheric pressure','pres',time)
| Python | 64 | 34.71875 | 89 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/plot_winds_on_mpaso_mesh.py | 0.590114 | 0.566929 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | #!/usr/bin/env python
"""
Tidal channel comparison betewen MPAS-O and analytical forcing result.
Phillip J. Wolfram
04/12/2019
"""
import numpy as np
import xarray as xr
import matplotlib.pyplot as plt
# render statically by default
plt.switch_backend('agg')
# analytical case
x = np.linspace(0,24,100)
y = np.sin(x*2*np.pi/24)
plt.plot(x,y, lw=3, color='black', label='analytical')
# data from MPAS-O on boundary
ds = xr.open_mfdataset('output.nc')
mask = ds.where(ds.yCell.values.min() == ds.yCell)
mask.ssh.mean('nCells').plot(marker='o', label='MPAS-O')
plt.legend()
plt.ylabel('ssh amplitude (m)')
plt.xlabel('Time (min)')
plt.savefig('tidalcomparison.png')
| Python | 32 | 19.625 | 70 | /testing_and_setup/compass/ocean/surface_waves/analysis/comparison.py | 0.706061 | 0.678788 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import numpy as np
import jigsaw_to_MPAS.mesh_definition_tools as mdt
from jigsaw_to_MPAS.coastal_tools import signed_distance_from_geojson, \
compute_cell_width
from geometric_features import read_feature_collection
import xarray
# Uncomment to plot the cell size distribution.
# import matplotlib
# matplotlib.use('Agg')
# import matplotlib.pyplot as plt
def cellWidthVsLatLon():
"""
Create cell width array for this mesh on a regular latitude-longitude grid.
Returns
-------
cellWidth : numpy.ndarray
m x n array, entries are desired cell width in km
lat : numpy.ndarray
latitude, vector of length m, with entries between -90 and 90,
degrees
lon : numpy.ndarray
longitude, vector of length n, with entries between -180 and 180,
degrees
"""
dlon = 0.1
dlat = dlon
nlon = int(360./dlon) + 1
nlat = int(180./dlat) + 1
lon = np.linspace(-180., 180., nlon)
lat = np.linspace(-90., 90., nlat)
cellWidthSouth = 30. * np.ones((len(lat)))
# Transition at Equator
cellWidthNorth = mdt.EC_CellWidthVsLat(lat)
latTransition = 0.0
latWidthTransition = 5.0
cellWidthVsLat = mdt.mergeCellWidthVsLat(
lat,
cellWidthSouth,
cellWidthNorth,
latTransition,
latWidthTransition)
_, cellWidth = np.meshgrid(lon, cellWidthVsLat)
# now, add the high-res region
fc = read_feature_collection('high_res_region.geojson')
signed_distance = signed_distance_from_geojson(fc, lon, lat,
max_length=0.25)
da = xarray.DataArray(signed_distance,
dims=['y', 'x'],
coords={'y': lat, 'x': lon},
name='signed_distance')
cw_filename = 'signed_distance.nc'
da.to_netcdf(cw_filename)
# multiply by 5 because transition_width gets multiplied by 0.2 in
# compute_cell_width
# Equivalent to 10 degrees latitude
trans_width = 5*1100e3
# The last term compensates for the offset in compute_cell_width.
# The middle of the transition is ~2.5 degrees (300 km) south of the
# region boundary to best match previous transition at 48 S. (The mean lat
# of the boundary is 45.5 S.)
trans_start = -300e3 - 0.5 * trans_width
dx_min = 10.
cellWidth = compute_cell_width(signed_distance, cellWidth, lon,
lat, dx_min, trans_start, trans_width,
restrict_box={'include': [], 'exclude': []})
# Uncomment to plot the cell size distribution.
# Lon, Lat = np.meshgrid(lon, lat)
# ax = plt.subplot(111)
# plt.pcolormesh(Lon, Lat, cellWidth)
# plt.colorbar()
# ax.set_aspect('equal')
# ax.autoscale(tight=True)
# plt.tight_layout()
# plt.savefig('cellWidthVsLat.png', dpi=200)
return cellWidth, lon, lat
| Python | 88 | 32.590908 | 79 | /testing_and_setup/compass/ocean/global_ocean/SO60to10wISC/init/define_base_mesh.py | 0.611976 | 0.586942 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import numpy
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
fig = plt.gcf()
nRow = 4
nCol = 2
nu = ['0.01', '1', '15', '150']
iTime = [1, 2]
time = ['day 10', 'day 20']
fig, axs = plt.subplots(nRow, nCol, figsize=(
4.0 * nCol, 3.7 * nRow), constrained_layout=True)
for iRow in range(nRow):
ncfile = Dataset('output_' + str(iRow + 1) + '.nc', 'r')
var = ncfile.variables['temperature']
xtime = ncfile.variables['xtime']
for iCol in range(nCol):
ax = axs[iRow, iCol]
dis = ax.imshow(var[iTime[iCol], 0::4, :].T, extent=[
0, 250, 500, 0], aspect='0.5', cmap='jet', vmin=10, vmax=20)
if iRow == nRow - 1:
ax.set_xlabel('x, km')
if iCol == 0:
ax.set_ylabel('depth, m')
if iCol == nCol - 1:
fig.colorbar(dis, ax=axs[iRow, iCol], aspect=10)
ax.set_title(time[iCol] + ", " + r"$\nu_h=$" + nu[iRow])
ncfile.close()
plt.savefig('sections_internal_waves.png')
| Python | 34 | 29.529411 | 84 | /testing_and_setup/compass/ocean/internal_waves/5km/rpe_test/plot.py | 0.55684 | 0.514451 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from netCDF4 import Dataset
import numpy
fig = plt.gcf()
fig.set_size_inches(8.0,10.0)
nRow=1 #6
nCol=2
nu=['0.01','0.1','1','10','100','1000']
iTime=[3,6]
time=['3 hrs','6 hrs']
for iRow in range(nRow):
ncfile = Dataset('output_'+str(iRow+1)+'.nc','r')
var = ncfile.variables['temperature']
xtime = ncfile.variables['xtime']
for iCol in range(nCol):
plt.subplot(nRow, nCol, iRow*nCol+iCol+1)
ax = plt.imshow(var[iTime[iCol],0::4,:].T,extent=[0,200,2000,0],aspect=2)
plt.clim([10,20])
plt.jet()
if iRow==nRow-1:
plt.xlabel('x, km')
if iCol==0:
plt.ylabel('depth, m')
plt.colorbar()
#print(xtime[iTime[iCol],11:13])
plt.title('time='+time[iCol]+', nu='+nu[iRow])
ncfile.close()
plt.savefig('sections_overflow.png')
| Python | 31 | 28.161291 | 81 | /testing_and_setup/compass/ocean/overflow/1km/rpe_test/plot.py | 0.590708 | 0.533186 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import pytest
from hurricane_model.hurricane import Hurricane
def test_hurricane():
center = [1.0,2.0] # Position of the eye (lon,lat) in decimal degrees.
extent = 100.0 # The maximum extent of the hurricane in kilometers.
vforward = [3.0, 4.0] # Forward velocity [ve, vn] in km/hr.
pcentral = 200.0 # Central pressure in millibars.
deltap = 50.0 # Pressure difference in millibars.
vmax = 15.0 # The maximum gradient wind speed in km/hr.
b = 1.2 # The Holland parameter, conventionally in the range [0.5,2.5].
hurricane = Hurricane(center,extent)
hurricane.setVForward(vforward[0],vforward[1])
hurricane.setPCentral(pcentral)
hurricane.setDeltaP(deltap)
hurricane.setVMax(vmax)
hurricane.setB(b)
assert hurricane.center == center
assert hurricane.extent == extent
assert hurricane.vforward == vforward
assert hurricane.pcentral == pcentral
assert hurricane.deltap == deltap
assert hurricane.vmax == vmax
assert hurricane.b == b
| Python | 26 | 38.192307 | 76 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/hurricane/test_hurricane.py | 0.696762 | 0.667321 |
nairita87/Ocean_dir | refs/heads/ocean_coastal |
def sign(x):
if(x>=0):
return 1
else:
return -1
| Python | 5 | 13.4 | 17 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/utils/math.py | 0.405405 | 0.364865 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import numpy as np
import matplotlib.pyplot as plt
def example():
x,y = np.linspace(-1,1,2), np.linspace(-1,1,2)
A, B = np.zeros((2,2)), np.zeros((2,2))
A[0,0]=1
B[0,0]=-1
A[0,1]=1
B[0,1]=1
A[1,0]=-1
B[1,0]=-1
A[1,1]=-1
B[1,1]=1
fig = plt.figure()
ax = fig.add_subplot(111)
# Plot the streamlines.
ax.streamplot(x,y,A,B)
ax.set_xlabel('$x$')
ax.set_ylabel('$y$')
ax.set_xlim(-2,2)
ax.set_ylim(-2,2)
ax.set_aspect('equal')
plt.show()
if __name__=='__main__':
example()
| Python | 30 | 17.566668 | 50 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/ad_hoc/simple_vector_example.py | 0.505376 | 0.4319 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | ../comparison.py | Python | 1 | 16 | 16 | /testing_and_setup/compass/ocean/drying_slope/zstar_variableCd/1km/analysis/comparison.py | 0.75 | 0.75 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import netCDF4
import numpy as np
import hurricane_model as Hurricane
import structures as Geogrid
import winds_io as WindModel
import matplotlib.pyplot as plt
import datetime
def write_netcdf(filename: str, curr_hurricane: Hurricane, grid: Geogrid, winds: WindModel):
# http://unidata.github.io/netcdf4-python/#section1
rootgrp = netCDF4.Dataset(filename, "w", format="NETCDF3_64BIT_OFFSET")
# Declare dimensions
rootgrp.createDimension('nCells',grid.ncells)
rootgrp.createDimension('StrLen',64)
rootgrp.createDimension('Time',None)
# Declare variables
time = rootgrp.dimensions['Time'].name
ncells = rootgrp.dimensions['nCells'].name
time_var = rootgrp.createVariable('xtime','S1',('Time','StrLen'))
u_var = rootgrp.createVariable('windSpeedU',np.float64,(time,ncells))
v_var = rootgrp.createVariable('windSpeedV',np.float64,(time,ncells))
pres_var = rootgrp.createVariable('atmosPressure',np.float64,(time,ncells))
# Format time
ref_date = curr_hurricane[0].ref_time
xtime = []
for it in range(0,len(curr_hurricane)):
t = curr_hurricane[it].time
date = ref_date + datetime.timedelta(hours=np.float64(t))
xtime.append(date.strftime('%Y-%m-%d_%H:%M:%S'+45*' '))
xtime = np.asarray(xtime)
xtime_list = []
for t in xtime:
xtime_list.append(list(t))
time_var[:] = xtime_list
# Assign variables
kmh_to_mps = 0.277778
mbar_to_pa = 100.0
for it in range(0, len(curr_hurricane)-1):
u_var[it, :] = winds[it].u * kmh_to_mps
v_var[it, :] = winds[it].v * kmh_to_mps
pres_var[it, :] = winds[it].pressure_profile * mbar_to_pa
# Close
rootgrp.close()
| Python | 48 | 34.458332 | 92 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds_io/output_data.py | 0.666275 | 0.645711 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | from winds_io import import_data
from winds_io import output_data
from structures import geogrid
import sys
import numpy as np
from winds import parameters
from winds import wind_model
def sim_hurricane():
# Read in the input file to check which grid we are using
print('Import user inputs')
traj_filename, grid_flag, grid_filename, ambient_pressure, holland_b_param = \
import_data.read_input_file('hurricane_inputs.txt')
# Read grid-specific parameters and create grid
print('Read-in grid')
grid = import_data.initialize_grid(grid_filename, grid_flag)
# Read hurricane trajectory and set hurricane parameters
print('Initialize hurricane trajectory data')
curr_hurricane = import_data.initialize_hurricane(traj_filename, ambient_pressure, holland_b_param)
# Define parameters
print('Define parameters')
params = define_params(curr_hurricane)
# Compute winds on grid
print('Compute winds')
winds = compute_winds(curr_hurricane, params, grid)
# Output results
print('Output results')
output_data.write_netcdf('out.nc', curr_hurricane, grid, winds)
def compute_winds(curr_hurricane, params, grid: geogrid):
ntimes = len(curr_hurricane) - 1
mywinds = []
for it in range(0, ntimes):
print('Time iteration %d / %d' % (it + 1, len(curr_hurricane) - 1))
mywinds.append(wind_model.WindModel(params, curr_hurricane[it], grid))
return mywinds
def define_params(curr_hurricane):
lat = []
for i in range(0, len(curr_hurricane)):
lat.append(curr_hurricane[i].center[1])
return parameters.Parameters(np.mean(lat))
if __name__ == "__main__":
sim_hurricane()
print('Program executed succesfully')
sys.exit(0)
# # Read in the input file to check which grid we are using
# traj_filename, grid_flag, grid_filename = import_data.read_input_file('hurricane_inputs.txt')
#
# # Read hurricane trajectory
# traj = import_data.read_json(traj_filename)
#
# # Create trajectory object
# curr_hurricane = initialize_hurricane(traj)
#
# # Read grid-specific parameters
# if grid_flag == 1:
# xll, yll, cellsize, numcells_lat, numcells_lon = import_data.read_raster_inputs(grid_filename)
# else:
# coord = import_data.read_netcdf(grid_filename)
# Create the grid
| Python | 71 | 32.211269 | 104 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/main.py | 0.683051 | 0.679661 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | #!/usr/bin/env python
"""
This script performs the first step of initializing the global ocean. This
includes:
Step 1. Build cellWidth array as function of latitude and longitude
Step 2. Build mesh using JIGSAW
Step 3. Convert triangles from jigsaw format to netcdf
Step 4. Convert from triangles to MPAS mesh
Step 5. Create vtk file for visualization
"""
from __future__ import absolute_import, division, print_function, \
unicode_literals
import subprocess
import os
import xarray
import argparse
import matplotlib.pyplot as plt
from mpas_tools.conversion import convert
from mpas_tools.io import write_netcdf
from jigsaw_to_MPAS.jigsaw_driver import jigsaw_driver
from jigsaw_to_MPAS.triangle_jigsaw_to_netcdf import jigsaw_to_netcdf
from jigsaw_to_MPAS.inject_bathymetry import inject_bathymetry
from jigsaw_to_MPAS.inject_meshDensity import inject_meshDensity
from jigsaw_to_MPAS.inject_preserve_floodplain import \
inject_preserve_floodplain
from define_base_mesh import define_base_mesh
def build_mesh(
preserve_floodplain=False,
floodplain_elevation=20.0,
do_inject_bathymetry=False,
geometry='sphere',
plot_cellWidth=True):
if geometry == 'sphere':
on_sphere = True
else:
on_sphere = False
print('Step 1. Build cellWidth array as function of horizontal coordinates')
if on_sphere:
cellWidth, lon, lat = define_base_mesh.cellWidthVsLatLon()
da = xarray.DataArray(cellWidth,
dims=['lat', 'lon'],
coords={'lat': lat, 'lon': lon},
name='cellWidth')
cw_filename = 'cellWidthVsLatLon.nc'
da.to_netcdf(cw_filename)
plot_cellWidth=True
if plot_cellWidth:
import matplotlib
from cartopy import config
import cartopy.crs as ccrs
matplotlib.use('Agg')
fig = plt.figure()
fig.set_size_inches(16.0, 8.0)
plt.clf()
ax = plt.axes(projection=ccrs.PlateCarree())
ax.set_global()
im = ax.imshow(cellWidth, origin='lower', transform=ccrs.PlateCarree(
), extent=[-180, 180, -90, 90], cmap='jet')
ax.coastlines()
gl = ax.gridlines(
crs=ccrs.PlateCarree(),
draw_labels=True,
linewidth=1,
color='gray',
alpha=0.5,
linestyle='-')
gl.xlabels_top = False
gl.ylabels_right = False
plt.title('Grid cell size, km')
plt.colorbar(im, shrink=.60)
plt.savefig('cellWidthGlobal.png')
else:
cellWidth, x, y, geom_points, geom_edges = define_base_mesh.cellWidthVsXY()
da = xarray.DataArray(cellWidth,
dims=['y', 'x'],
coords={'y': y, 'x': x},
name='cellWidth')
cw_filename = 'cellWidthVsXY.nc'
da.to_netcdf(cw_filename)
print('Step 2. Generate mesh with JIGSAW')
if on_sphere:
jigsaw_driver(cellWidth, lon, lat)
else:
jigsaw_driver(
cellWidth,
x,
y,
on_sphere=False,
geom_points=geom_points,
geom_edges=geom_edges)
print('Step 3. Convert triangles from jigsaw format to netcdf')
jigsaw_to_netcdf(msh_filename='mesh-MESH.msh',
output_name='mesh_triangles.nc', on_sphere=on_sphere)
print('Step 4. Convert from triangles to MPAS mesh')
write_netcdf(convert(xarray.open_dataset('mesh_triangles.nc')),
'base_mesh.nc')
print('Step 5. Inject correct meshDensity variable into base mesh file')
inject_meshDensity(cw_filename=cw_filename,
mesh_filename='base_mesh.nc', on_sphere=on_sphere)
if do_inject_bathymetry:
print('Step 6. Injecting bathymetry')
inject_bathymetry(mesh_file='base_mesh.nc')
if preserve_floodplain:
print('Step 7. Injecting flag to preserve floodplain')
inject_preserve_floodplain(mesh_file='base_mesh.nc',
floodplain_elevation=floodplain_elevation)
print('Step 8. Create vtk file for visualization')
args = ['paraview_vtk_field_extractor.py',
'--ignore_time',
'-l',
'-d', 'maxEdges=0',
'-v', 'allOnCells',
'-f', 'base_mesh.nc',
'-o', 'base_mesh_vtk']
print("running", ' '.join(args))
subprocess.check_call(args, env=os.environ.copy())
print("***********************************************")
print("** The global mesh file is base_mesh.nc **")
print("***********************************************")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--preserve_floodplain', action='store_true')
parser.add_argument('--floodplain_elevation', action='store',
type=float, default=20.0)
parser.add_argument('--inject_bathymetry', action='store_true')
parser.add_argument('--geometry', default='sphere')
parser.add_argument('--plot_cellWidth', action='store_true')
cl_args = parser.parse_args()
build_mesh(cl_args.preserve_floodplain, cl_args.floodplain_elevation,
cl_args.inject_bathymetry, cl_args.geometry,
cl_args.plot_cellWidth)
| Python | 151 | 35.278145 | 83 | /testing_and_setup/compass/ocean/jigsaw_to_MPAS/build_mesh.py | 0.586528 | 0.579226 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import numpy as np
class GeoGrid:
def __init__(self, lon: np.ndarray, lat: np.ndarray):
"""
Constructor.
:param lon: longitude of the grid in radians, as numpy array
:param lat: latitude of the grid in radians, as numpy array
"""
self.lon = lon
self.lat = lat
self.ncells = len(lon)
# '''
# A class that defines the structure, location, extent, and resolution of a geographic grid.
# The grid is not the same as a geospatial raster, but is related in that, while a raster numbers vertical cells
# starting from the top of the raster, the grid cells are numbered from the bottom. That is, a raster is oriented
# like a raster of pixels, while the geographic grid is oriented like a regular Cartesian grid of cells. The
# data in the grid is contained in a two-dimensional NumPy array. Because of this, the grid cell is indexed like
# a Fortran array (column major indexing, i.e. i=column, j=row).
# '''
# def __init__(self, lon, lat, nlon, nlat, cellsize, defaultValue=0.0):
# '''
# Constructor.
# :param lon: Lower-left longitude of the grid in decimal degrees.
# :param lat: Lower-left latitude of the grid in decimal degrees.
# :param nlon: The number of cells in longitude.
# :param nlat: The number of cells in latitude.
# :param cellsize: The size of a cell in the grid.
# '''
# self.lon = lon
# self.lat = lat
# self.nlon = nlon
# self.nlat = nlat
# self.cellsize = cellsize
# self.defaultValue = defaultValue
# self.grid = np.zeros([nlat,nlon],dtype=np.float64)
# self.bounds = [self.lon, self.lon + self.nlon*self.cellsize,
# self.lat, self.lat + self.nlat*self.cellsize]
#
#
# def put(self,i,j,v):
# if self.indexInside(i,j):
# self.grid[self.nlat-j-1,i]=v
#
# def getByIndex(self,i,j):
# if self.indexInside(i,j):
# return self.grid[self.nlat-j-1,i]
# else:
# return self.defaultValue
#
# def getByCoordinate(self,lon,lat):
# if self.coordinateInside(lon,lat):
# index = self.getIndex(lon,lat)
# return self.getByIndex(index[0],index[1])
# else:
# return self.defaultValue
#
# def clear(self):
# self.grid.fill(0.0)
#
# def indexInside(self,i,j):
# if i>=0 and i<self.nlon and j>=0 and j<self.nlat:
# return True
# else:
# return False
#
# def coordinateInside(self,lon,lat):
# if lon>=self.bounds[0] and lon<=self.bounds[1] and lat>=self.bounds[2] and lat<=self.bounds[3]:
# return True
# else:
# return False
#
# def getOrigin(self):
# return [self.lon,self.lat]
#
# def getCenter(self,i,j):
# clon = self.lon + (i+0.5)*self.cellsize
# clat = self.lat + (j+0.5)*self.cellsize
# return [clon,clat]
#
# def getIndex(self,lon,lat):
# i = int((lon-self.lon)/self.cellsize)
# j = int((lat-self.lat)/self.cellsize)
# return [i,j]
#
| Python | 89 | 35.213482 | 117 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/structures/geogrid.py | 0.573069 | 0.566863 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import math
class Velocities:
def __init__(self, vfe, vfn, vmax):
"""
Initialize with the forward velocity components.
:param vfe: Eastward forward velocity (x-component in the Earth frame) in km/hr.
:param vfn: Northward forward velocity component (y-component in the Earth frame) in km/hr.
"""
self.vf = []
self.vfmagn = []
self.xunitv = []
self.yunitv = []
self.set_vforward(vfe, vfn)
self.vmax = vmax
def set_vforward(self, vfe, vfn):
self.vf = [vfe, vfn]
self.vfmagn = math.sqrt(pow(vfe, 2) + pow(vfn, 2))
self.xunitv = [vfn/self.vfmagn, -vfe/self.vfmagn]
self.yunitv = [vfe/self.vfmagn, vfn/self.vfmagn]
def compute_wind_vector(self, vg, xe, yn):
"""
Returns the velocity components [ve,vn] given the tangential gradient wind speed.
:param vg: The tangential (theta) gradient wind speed in the hurricane frame in km/hr.
:param xe: The eastern component of position relative to the local origin (the hurricane eye) in km.
:param yn: The northern component of position relative to the local origin (the hurricane eye) in km.
:return: [ve,vn] the eastward and nortward components of the wind velocity in the Earth frame in km/hr.
"""
rmagn = math.sqrt(xe*xe + yn*yn)
costheta = (xe*self.xunitv[0] + yn*self.xunitv[1])/rmagn
sintheta = -(xe*self.xunitv[1] - yn*self.xunitv[0])/rmagn
theta_unitv = [-sintheta*self.xunitv[0]+costheta*self.yunitv[0],
-sintheta*self.xunitv[1]+costheta*self.yunitv[1]]
vgtheta = [theta_unitv[0]*vg, theta_unitv[1]*vg]
vfcorr = vg/self.vmax
ve = self.vf[0]*vfcorr + vgtheta[0]
vn = self.vf[1]*vfcorr + vgtheta[1]
return [ve, vn]
| Python | 46 | 39.239132 | 111 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/velocities.py | 0.606911 | 0.598272 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import numpy as np
import math
class RadialProfile():
def __init__(self,n,extent):
self.profile = np.zeros(n,dtype=np.float64)
self.rvals = np.zeros(n,dtype=np.float64)
self.n = n
self.extent = extent
self.dr = extent/(n-1)
for i in range(0,n):
self.rvals[i] = i*self.dr
def getValue(self,r):
if r<0 or r>self.extent:
return 0.0
else:
k = int(r/self.dr)
return self.rvals[k]
class PressureProfile(RadialProfile):
def __init__(self,n,extent,pcentral,deltap,rmax):
super().__init__(n,extent)
self.pcentral = pcentral
self.deltap = deltap
self.rmax = rmax
class HollandPressureProfile(PressureProfile):
def __init__(self,n,extent,pcentral,deltap,rmax,b):
super().__init__(n,extent,pcentral,deltap,rmax)
self.b = b
for i in range(0,self.n):
r = self.rvals[i]
if r>0:
p = self.pcentral + self.deltap*math.exp(-pow(self.rmax/r,b))
else:
p = pcentral
self.profile[i] = p
class WindSpeedProfile(RadialProfile):
def __init__(self,n,extent,rmax):
super().__init__(n,extent)
self.rmax = rmax
self.vmax = 0
def getVmax(self):
if self.vmax==0:
for i in range(0,self.n):
self.vmax = max(self.vmax,self.profile[i])
return self.vmax
class HollandWindSpeedProfile(WindSpeedProfile):
def __init__(self,n,extent,rmax,deltap,rho,f,b,coriolis=False):
super().__init__(n,extent,rmax)
self.units_factor = 100 # To convert the leading term to m/s
# This factor comes from adopting millibars instead of Pascals, and km/hr instead of m/s.
self.deltap = deltap
self.rho = rho
self.f = f
self.b = b
for i in range(0,self.n):
r = self.rvals[i]
if r>0:
y = pow(rmax/r,b)
exp_term = self.units_factor*(deltap/rho)*b*y*math.exp(-y)
if coriolis == True:
v = math.sqrt(exp_term + 0.25*pow(r,2)*pow(f,2))+0.5*r*f
else:
v = math.sqrt(exp_term)
else:
v = 0.0
self.profile[i] = v * 3.6 # to convert to km/h
| Python | 73 | 31.246574 | 97 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/profile_model/radialprofiles.py | 0.528887 | 0.516143 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import json
from netCDF4 import Dataset
import numpy as np
import math
from hurricane_model import hurricane
from structures import geogrid
import datetime
def read_grid_file(grid_filename: str, grid_flag: int) -> (float, float):
if grid_flag == 1:
xll, yll, cellsize, numcells_lat, numcells_lon = read_raster_inputs(grid_filename)
lon, lat = setup_regular_grid(xll, yll, cellsize, numcells_lat, numcells_lon)
else:
lon, lat = read_netcdf(grid_filename)
return lon, lat
def read_input_file(filename: str) -> (str, int, str, float, float):
try:
f = open(filename, "r")
except FileNotFoundError as fnf_error:
raise fnf_error
traj_filename = f.readline().rstrip('\n')
grid_flag = f.readline().rstrip('\n').split()
grid_flag = int(grid_flag[0])
grid_filename = f.readline().rstrip('\n')
ambient_pressure = f.readline().rstrip('\n').split()
ambient_pressure = float(ambient_pressure[0])
holland_b_param = f.readline().rstrip('\n').split()
holland_b_param = float(holland_b_param[0])
f.close()
return traj_filename, grid_flag, grid_filename, ambient_pressure, holland_b_param
def setup_regular_grid(xll: float, yll: float, cellsize: float, numcells_lat: int, numcells_lon: int) -> (float, float):
npoints = numcells_lat * numcells_lon
lon = np.zeros((npoints, ))
lat = np.zeros((npoints, ))
k = 0
for i in range(0, numcells_lon):
for j in range(0, numcells_lat):
lon[k] = xll + (float(i) + 0.5) * cellsize
lat[k] = yll + (float(j) + 0.5) * cellsize
k += 1
lat = lat * math.pi / 180. # Convert to radians
lon = lon * math.pi / 180. # Convert to radians
return lon, lat
def read_raster_inputs(filename: str) -> (float, float, float, int, int):
try:
f = open(filename, "r")
except FileNotFoundError as fnf_error:
raise fnf_error
# longitude of the south west corner in deg
temp = f.readline().rstrip('\n').split()
xll = float(temp[0])
# latitude of the south west corner in deg
temp = f.readline().rstrip('\n').split()
yll = float(temp[0])
# cell size in deg
temp = f.readline().rstrip('\n').split()
cellsize = float(temp[0])
# number of cells for latitude
temp = f.readline().rstrip('\n').split()
numcells_lat = int(temp[0])
# number of cells for longitude
temp = f.readline().rstrip('\n').split()
numcells_lon = int(temp[0])
f.close()
return xll, yll, cellsize, numcells_lat, numcells_lon
def read_json(filename: str):
try:
with open(filename) as json_data:
json_raw = json.load(json_data)
return json_raw
except FileNotFoundError as fnf_error:
raise fnf_error
def read_netcdf(filename: str) -> (float, float):
# http://unidata.github.io/netcdf4-python/#section1
# lat and lon from the netCDF file are assumed in radians
try:
nc = Dataset(filename)
temp_lat = nc.variables['latCell'][:]
temp_lon = nc.variables['lonCell'][:]
# Convert to numpy array for subsequent processing
lat = np.array(temp_lat)
lon = np.array(temp_lon) - 2. * math.pi
for i in range(0, len(lon)):
if lon[i] <= -math.pi:
lon[i] += 2. * math.pi
return lon, lat
except FileNotFoundError as fnf_error:
raise fnf_error
def initialize_hurricane(traj_filename: str, ambient_pressure: float, holland_b_param: float) -> list:
# JSON Specs
# "timeUnits": "hours",
# "distanceUnits": "miles",
# "windspeedUnits": "knots",
# "pressureUnits": "mb",
json_raw = read_json(traj_filename)
ref_date = datetime.datetime.strptime(json_raw['initialTime'],'%Y-%m-%d_%H:%M:%S')
curr_hurricane = []
traj = json_raw['stormTrack']['features']
for it in range(0, len(traj)):
coord = traj[it]['geometry']['coordinates']
center_coord = [x * math.pi / 180. for x in coord] # degree to rad
extent = traj[it]['properties']['rMax'] * 1.60934 # miles to km
pmin = traj[it]['properties']['minP'] # in mbar
deltap = ambient_pressure - pmin # in mbar
time = traj[it]['properties']['time'] # in hrs
vmax = traj[it]['properties']['wMax'] * 1.852 # from knots to km/h
curr_hurricane.append(hurricane.Hurricane(tuple(center_coord), extent, pmin, deltap, vmax,
holland_b_param, time, ref_date))
# Compute the components of the forward velocity
for it in range(0, len(traj) - 1):
x1 = curr_hurricane[it].center[0]
y1 = curr_hurricane[it].center[1]
x2 = curr_hurricane[it + 1].center[0]
y2 = curr_hurricane[it + 1].center[1]
theta = math.atan2(y2 - y1, x2 - x1)
vf = traj[it]['properties']['vf'] * 1.852
curr_hurricane[it].set_vf((vf * math.cos(theta), vf * math.sin(theta)))
return curr_hurricane
def initialize_grid(grid_filename: str, grid_flag: int) -> geogrid.GeoGrid:
lon, lat = read_grid_file(grid_filename, grid_flag)
return geogrid.GeoGrid(lon, lat)
| Python | 156 | 32.570515 | 120 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds_io/import_data.py | 0.59958 | 0.587359 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | # Author: Steven Brus
# Date: April, 2020
# Description: This function writes time-varying forcing data to an input file for the model run.
import os
import numpy as np
import netCDF4
##################################################################################################
##################################################################################################
def write_to_file(filename,data,var,xtime):
if os.path.isfile(filename):
data_nc = netCDF4.Dataset(filename,'a', format='NETCDF3_64BIT_OFFSET')
else:
data_nc = netCDF4.Dataset(filename,'w', format='NETCDF3_64BIT_OFFSET')
# Find dimesions
ncells = data.shape[1]
nsnaps = data.shape[0]
# Declare dimensions
data_nc.createDimension('nCells',ncells)
data_nc.createDimension('StrLen',64)
data_nc.createDimension('Time',None)
# Create time variable
time = data_nc.createVariable('xtime','S1',('Time','StrLen'))
time[:,:] = netCDF4.stringtochar(xtime)
# Set variables
data_var = data_nc.createVariable(var,np.float64,('Time','nCells'))
data_var[:,:] = data[:,:]
data_nc.close()
##################################################################################################
##################################################################################################
| Python | 38 | 33.605263 | 98 | /testing_and_setup/compass/ocean/hurricane/scripts/write_forcing_file.py | 0.476046 | 0.460076 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | from enum import Enum
import numpy as np
import winds.parameters as Parameters
import hurricane_model as Hurricane
import structures as Geogrid
import matplotlib.pyplot as plt
import math
class PROFILE_TYPE(Enum):
HOLLAND = 'holland'
WILLOUGHBY = 'willoughby'
class WindModel:
def __init__(self, params: Parameters, curr_hurricane: Hurricane, grid: Geogrid):
self.profile_type = params.wind_profile_type
if self.profile_type == PROFILE_TYPE.HOLLAND:
# Distance between the hurricane eye and the grid points
# Great circle distance in km
r = np.power(np.sin((grid.lat - curr_hurricane.center[1]) * 0.5), 2) + \
np.cos(grid.lat) * np.cos(curr_hurricane.center[1]) * \
np.power(np.sin((grid.lon - curr_hurricane.center[0]) * 0.5), 2)
r = 2.0 * params.earth_radius * np.arcsin(np.sqrt(r))
# Compute pressure
self.pressure_profile = holland_pressure_profile(curr_hurricane, r)
# Compute wind speed
self.wind_speed_profile = holland_windspeed_profile(params, curr_hurricane, r)
# plt.scatter(grid.lon, grid.lat, s=10., c=self.wind_speed_profile, alpha=1.)
# plt.show()
# Compute wind components
self.u, self.v = compute_components(self.wind_speed_profile, curr_hurricane, grid)
else:
raise 'Profile models other than Holland are not currently supported.'
def holland_pressure_profile(hurricane: Hurricane, r: np.ndarray):
"""
:param hurricane: class type Hurricane
:param r: distance between the eye of the hurricane and the grid points in km
"""
return hurricane.pcentral + hurricane.deltap * np.exp(-np.power(hurricane.extent / r ,hurricane.b))
def holland_windspeed_profile(params: Parameters, hurricane: Hurricane, r: np.ndarray, coriolis=False):
"""
:param params: class parameters
:param hurricane: class Hurricane
:param r: distance between the eye of the hurricane and the grid points in km
:param coriolis: coriolis factor in rad/hrs
"""
# Holland equation assumes:
# deltap in Pa
# density in kg/m3
# and returns m/s
units_factor = 100. # To convert the deltap from mbar to Pascals
y = np.power(hurricane.extent / r, hurricane.b)
exp_term = units_factor*(hurricane.deltap / params.rho) * hurricane.b * y * np.exp(-y)
if coriolis is True:
v = np.sqrt(exp_term + 0.25 * np.power(r * params.f, 2)) + 0.5 * r * params.f
else:
v = np.sqrt(exp_term)
v *= 3.6 # Conversion from m/s to km/h
return v
def compute_components(wind_speed_profile, curr_hurricane: Hurricane, grid: Geogrid) -> (np.ndarray, np.ndarray):
# Compute components of vg
theta = np.arctan2(grid.lat - curr_hurricane.center[1], grid.lon - curr_hurricane.center[0])
theta += math.pi * 0.5
vg_x = wind_speed_profile * np.cos(theta)
vg_y = wind_speed_profile * np.sin(theta)
# Compute total velocity
ratio = wind_speed_profile / curr_hurricane.vmax
u = vg_x + curr_hurricane.vforward[0] * ratio
v = vg_y + curr_hurricane.vforward[1] * ratio
return u, v | Python | 88 | 36.511364 | 113 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/wind_model.py | 0.629697 | 0.619697 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | #!/usr/bin/env python
'''
Script to map cell indices from MPASO noLI mesh to those of the wLI mesh in the runoff mapping file.
Start by building a runoff mapping file that has all the mesh description from wLI mapping file
but the actual mapping from the noLI mapping file:
ncks -x -v S,col,row /project/projectdirs/acme/inputdata/cpl/cpl6/map_rx1_to_oEC60to30v3wLI_smoothed.r300e600.170328.nc newfile.nc
ncks -A -v S,col,row /project/projectdirs/acme/inputdata/cpl/cpl6/map_rx1_to_oEC60to30v3_smoothed.r300e600.161222.nc newfile.nc
'''
# import modules # {{{
import netCDF4
import numpy as np
import argparse
import shutil
# }}}
# parser # {{{
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_file', dest='input_file',
default='map_rx1_to_oEC60to30v3wLI.nc',
help='Input file, original runoff mapping file'
)
parser.add_argument('-o', '--output_file', dest='output_file',
default='map_rx1_to_oEC60to30v3wLI_final.nc',
help='Output file, revised runoff mapping file with no runoff below ice shelf cavities'
)
parser.add_argument('-l', '--lookup_table_file', dest='lookup_table_file',
default='lookup_table.txt',
help='lookup table file, only used locally'
)
parser.add_argument('-w', '--mesh_with_ISC', dest='mesh_with_ISC',
default='culled_mesh.nc',
help='mesh file, including ice shelf cavities'
)
parser.add_argument('-n', '--mesh_no_ISC', dest='mesh_no_ISC',
default='no_ISC_culled_mesh.nc',
help='mesh file, but without ice shelf cavities'
)
input_file = parser.parse_args().input_file
output_file = parser.parse_args().output_file
lookup_table_file = parser.parse_args().lookup_table_file
shutil.copy2(input_file, output_file)
# }}}
build_table = True
if build_table:
# noLI mesh
mesh_no_ISC = netCDF4.Dataset(parser.parse_args().mesh_no_ISC, 'r')
noLIxCell = mesh_no_ISC.variables['xCell'][:]
noLIyCell = mesh_no_ISC.variables['yCell'][:]
noLInCells = len(mesh_no_ISC.dimensions['nCells'])
# wLI mesh
mesh_with_ISC = netCDF4.Dataset(parser.parse_args().mesh_with_ISC, 'r')
wLIxCell = mesh_with_ISC.variables['xCell'][:]
wLIyCell = mesh_with_ISC.variables['yCell'][:]
# init lookup table
lookup = np.zeros((noLInCells,), dtype=np.uint32)
print("nCells=", noLInCells)
for i in range(noLInCells):
# for i in range(30):
if i % 1000 == 0:
print("Cell: ", i)
# find index of wLI mesh that is the same location as each cell in the
# noLI mesh
lookup[i] = np.argmin((noLIxCell[i] - wLIxCell[:])
** 2 + (noLIyCell[i] - wLIyCell[:])**2)
mesh_no_ISC.close()
mesh_with_ISC.close()
print( "Lookup table complete.")
np.savetxt(lookup_table_file, lookup, fmt='%d')
print("Saved to ", lookup_table_file)
else:
lookup = np.loadtxt(lookup_table_file, dtype=np.uint32)
print("Loaded lookup table from:", lookup_table_file)
print("Lookup: first entries:", lookup[0:10])
print("Lookup: last entries:", lookup[-10:])
# now swap in wLI indices into the runoff mapping file
f = netCDF4.Dataset(output_file, "r+")
row = f.variables['row'][:]
rownew = row * 0
for i in range(len(row)):
rownew[i] = lookup[row[i] - 1] + 1 # 1-based
f.variables['row'][:] = rownew[:]
f.close()
print("Copied over indices.")
# vim: foldmethod=marker ai ts=4 sts=4 et sw=4 ft=python
| Python | 94 | 37.765957 | 130 | /testing_and_setup/compass/ocean/global_ocean/scripts/copy_cell_indices_ISC.py | 0.623765 | 0.601811 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | from winds.wind_model import PROFILE_TYPE
from winds.parameters import Parameters
import math
def test_parameters():
gridsize = [10, 10]
nr = 100
wind_profile_type = PROFILE_TYPE.HOLLAND
grid_position = [-106.0,35.0]
cellsize = 2.0
siderealDay = 23.934 # A sidereal day in hrs.
omega = 2.0 * math.pi / siderealDay # The Earth's rotation rate in rad/hr.
rho = 1.225e9 # Air density at sea level in kg/m^3.
distance_unit = 'kilometers'
time_unit = 'hours'
pressure_unit = 'millibars'
# The Coriolis parameter should be 2*omega*sin(pi*|phi|/360), for phi in degrees latitude [-90,90].
params = Parameters(gridsize,nr,wind_profile_type)
def eval_coriolis(lat,omega):
return 2*omega * math.sin(math.pi*math.fabs(lat)/360)
| Python | 24 | 31.541666 | 103 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_parameters.py | 0.674776 | 0.622279 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
fig = plt.gcf()
nRow = 1 # 2
nCol = 5
nu = ['1', '5', '10', '100', '200']
iTime = [0]
time = ['20']
# ---nx,ny for 10 km
#nx = 16
#ny = 50
# ---nx,ny for 4 km
nx = 40
ny = 126
# ---nx,ny for 1 km
#nx = 160
#ny = 500
fig, axs = plt.subplots(nRow, nCol, figsize=(
2.1 * nCol, 5.0 * nRow), constrained_layout=True)
for iCol in range(nCol):
for iRow in range(nRow):
ncfile = Dataset('output_' + str(iCol + 1) + '.nc', 'r')
var = ncfile.variables['temperature']
var1 = np.reshape(var[iTime[iRow], :, 0], [ny, nx])
# --- flip in y-dir
var = np.flipud(var1)
# --- Every other row in y needs to average two neighbors in x on planar hex mesh
var_avg = var
for j in range(0, ny, 2):
for i in range(0, nx - 2):
var_avg[j, i] = (var[j, i + 1] + var[j, i]) / 2.0
if nRow == 1:
ax = axs[iCol]
if nRow > 1:
ax = axs[iRow, iCol]
dis = ax.imshow(
var_avg,
extent=[
0,
160,
0,
500],
cmap='jet',
vmin=11.8,
vmax=13.0)
ax.set_title("day " + time[iRow] + ", " + r"$\nu_h=$" + nu[iCol])
ax.set_xticks(np.arange(0, 161, step=40))
ax.set_yticks(np.arange(0, 501, step=50))
if iRow == nRow - 1:
ax.set_xlabel('x, km')
if iCol == 0:
ax.set_ylabel('y, km')
if iCol == nCol - 1:
if nRow == 1:
fig.colorbar(dis, ax=axs[nCol - 1], aspect=40)
if nRow > 1:
fig.colorbar(dis, ax=axs[iRow, nCol - 1], aspect=40)
ncfile.close()
if nx == 16:
res = '10'
if nx == 40:
res = '4'
if nx == 160:
res = '1'
plt.savefig("sections_baroclinic_channel_" + res + "km.png")
| Python | 79 | 24.012659 | 89 | /testing_and_setup/compass/ocean/baroclinic_channel/4km/rpe_test/plot.py | 0.47419 | 0.423077 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | #!/usr/bin/env python
'''
name: define_base_mesh
authors: Phillip J. Wolfram
This function specifies the resolution for a coastal refined mesh for the CA coast from SF to LA for
Chris Jeffrey and Mark Galassi.
It contains the following resolution resgions:
1) a QU 120km global background resolution
2) 3km refinement region along the CA coast from SF to LA, with 30km transition region
'''
import numpy as np
import jigsaw_to_MPAS.coastal_tools as ct
def cellWidthVsLatLon():
km = 1000.0
params = ct.default_params
SFtoLA = {"include": [np.array([-124.0, -117.5, 34.2, 38.0])], # SF to LA
"exclude": [np.array([-122.1, -120.8, 37.7, 39.2])]} # SF Bay Delta
WestCoast = np.array([-136.0, -102.0, 22.0, 51])
print("****QU120 background mesh and 300m refinement from SF to LA****")
params["mesh_type"] = "QU"
params["dx_max_global"] = 120.0 * km
params["region_box"] = SFtoLA
params["plot_box"] = WestCoast
params["dx_min_coastal"] = 3.0 * km
params["trans_width"] = 100.0 * km
params["trans_start"] = 30.0 * km
cell_width, lon, lat = ct.coastal_refined_mesh(params)
return cell_width / 1000, lon, lat
| Python | 38 | 29.947369 | 101 | /testing_and_setup/compass/ocean/global_ocean/CA120to3/build_mesh/define_base_mesh.py | 0.636905 | 0.571429 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import math
from winds.wind_model import PROFILE_TYPE
class Parameters:
def __init__(self, mean_lat: float, wind_profile_type=PROFILE_TYPE.HOLLAND):
"""
Constructor.
:param mean_lat: mean latitude of the hurricane trajectory to compute the Coroilis factor in radians
Units are km, hr, and millibars for distance, wind, and pressure respectively, and lat in decimal degrees.
"""
self.siderealDay = 23.934 # A sidereal day in hrs.
self.omega = 2.0 * math.pi / self.siderealDay # The Earth's rotation rate in rad/hr.
self.rho = 1.15 # Air density at sea level in kg/m^3.
self.wind_profile_type = wind_profile_type # The particular wind profile model being used.
# Earth radius in km
self.earth_radius = 6371.1
def get_coriolis(self, lat: float) -> float:
"""
Returns the Coriolis parameter for a given latitude.
:param lat: in radians
:return: coriolis factor in rad/s to be consistent with Holland's model units
"""
# The Coriolis parameter = 2*omega*sin(|phi|)
# 3600 to convert omega in rad/s
return 2.0 * self.omega / 3600. * math.sin(math.fabs(lat))
def get_pressure_unit(self):
return 'millibars'
def get_distance_unit(self):
return 'kilometers'
def get_time_unit(self):
return 'hours'
| Python | 41 | 33.048782 | 114 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/winds/parameters.py | 0.635387 | 0.616046 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import numpy
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
fig = plt.gcf()
nRow = 6
nCol = 2
iTime = [8, 16]
nu = ['0.01', '0.1', '1', '10', '100', '200']
time = ['hour 8', 'hour 16']
fig, axs = plt.subplots(nRow, nCol, figsize=(
5.3 * nCol, 2.0 * nRow), constrained_layout=True)
for iRow in range(nRow):
ncfile = Dataset('output_' + str(iRow + 1) + '.nc', 'r')
var = ncfile.variables['temperature']
xtime = ncfile.variables['xtime']
for iCol in range(nCol):
ax = axs[iRow, iCol]
dis = ax.imshow(var[iTime[iCol], 0:512:4, :].T, extent=[
0, 120, 20, 0], aspect=2, cmap='jet', vmin=5, vmax=30)
if iRow == nRow - 1:
ax.set_xlabel('x, km')
if iCol == 0:
ax.set_ylabel('depth, m')
if iCol == nCol - 1:
fig.colorbar(dis, ax=axs[iRow, iCol], aspect=5)
ax.set_title(time[iCol] + ", " + r"$\nu_h=$" + nu[iRow])
ncfile.close()
plt.savefig('sections_lock_exchange.png', bbox_inches='tight')
| Python | 34 | 30.470589 | 78 | /testing_and_setup/compass/ocean/lock_exchange/0.5km/rpe_test/plot.py | 0.558878 | 0.514019 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | # /usr/bin/env python
"""
% Create cell width array for this mesh on a regular latitude-longitude grid.
% Outputs:
% cellWidth - m x n array, entries are desired cell width in km
% lat - latitude, vector of length m, with entries between -90 and 90, degrees
% lon - longitude, vector of length n, with entries between -180 and 180, degrees
"""
import numpy as np
import jigsaw_to_MPAS.mesh_definition_tools as mdt
def cellWidthVsLatLon():
lat = np.arange(-90, 90.01, 0.1)
lon = np.arange(-180, 180.01, 0.1)
QU1 = np.ones(lat.size)
EC60to30 = mdt.EC_CellWidthVsLat(lat)
RRS30to6 = mdt.RRS_CellWidthVsLat(lat, 30, 6)
AtlNH = RRS30to6
AtlGrid = mdt.mergeCellWidthVsLat(lat, EC60to30, AtlNH, 0, 4)
PacNH = mdt.mergeCellWidthVsLat(lat, 30 * QU1, RRS30to6, 50, 12)
PacGrid = mdt.mergeCellWidthVsLat(lat, EC60to30, PacNH, 0, 6)
cellWidth = mdt.AtlanticPacificGrid(lat, lon, AtlGrid, PacGrid)
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('Agg')
plt.clf()
plt.plot(lat, AtlGrid, label='Atlantic')
plt.plot(lat, PacGrid, label='Pacific')
plt.grid(True)
plt.xlabel('latitude')
plt.title('Grid cell size, km')
plt.legend()
plt.savefig('cellWidthVsLat.png')
return cellWidth, lon, lat
| Python | 41 | 30.634146 | 84 | /testing_and_setup/compass/ocean/global_ocean/ARM60to6/init/define_base_mesh.py | 0.680802 | 0.631457 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | # Author: Steven Brus
# Date April, 2020
# Description:
# This creates a "dummy" time varying forcing file
# with zero wind zero atmospheric pressure perturbation
# for the tidal spinup run.
#
# The tidal spinup is run using this "dummy" atmospheric forcing
# because the time varying atmospheric forcing for the
# forward run requires information in the restart file.
# The inclusion of this additional information in the restart
# file is trigged by the use of time varying atmospheric forcing
# in the tidal spinup.
import netCDF4
import matplotlib.pyplot as plt
import numpy as np
import glob
import pprint
import datetime
import os
import yaml
import subprocess
import argparse
import write_forcing_file
plt.switch_backend('agg')
##################################################################################################
##################################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--start_time')
parser.add_argument('--spinup_length')
args = parser.parse_args()
# Files to interpolate to/from
grid_file = './mesh.nc'
forcing_file = 'spinup_atmospheric_forcing.nc'
# Setup timestamps
# (3 time snaps are needed because new data will be read in at the end of the simulation)
dtformat = '%Y-%m-%d_%H:%M:%S'
start_time = datetime.datetime.strptime(args.start_time,dtformat)
spinup_length = float(args.spinup_length)
xtime = []
xtime.append(args.start_time+45*' ')
next_time = start_time + datetime.timedelta(days=spinup_length)
xtime.append(datetime.datetime.strftime(next_time,dtformat)+45*' ')
next_time = next_time + datetime.timedelta(days=spinup_length)
xtime.append(datetime.datetime.strftime(next_time,dtformat)+45*' ')
xtime = np.array(xtime,'S64')
print(xtime)
# Get grid from grid file
grid_nc = netCDF4.Dataset(grid_file,'r')
lon_grid = grid_nc.variables['lonCell'][:]
ncells = lon_grid.size
# Initialize atmospheric forcing fields
u_data = np.zeros((3,ncells))
v_data = np.zeros((3,ncells))
p_data = np.zeros((3,ncells)) + 101325.0
print(p_data.shape)
# Write to NetCDF file
subprocess.call(['rm',forcing_file])
write_forcing_file.write_to_file(forcing_file,u_data,'windSpeedU',xtime)
write_forcing_file.write_to_file(forcing_file,v_data,'windSpeedV',xtime)
write_forcing_file.write_to_file(forcing_file,p_data,'atmosPressure',xtime)
| Python | 71 | 33.915493 | 98 | /testing_and_setup/compass/ocean/hurricane/scripts/spinup_time_varying_forcing.py | 0.663306 | 0.653226 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | from winds.velocities import Velocities
import math
def test_velocities():
# Forward velocity in km/hr.
vfe = -1.0 # Eastward .
vfn = 0.0 # Northward.
vg = 1.0 # Tangential gradient wind speed in km/hr.
veloc = Velocities(vfe,vfn)
r = 1.0 # Unit circle about the origin.
np = 360
dtheta = 2*math.pi/np
with open('test_velocities_out.csv','wt') as out:
out.write('x,y,vx,vy,r,theta_degrees\n')
for i in range(0,np):
theta = i*dtheta
degrees = 180.0*theta/math.pi
x = r*math.cos(theta)
y = r*math.sin(theta)
v = veloc.compute_wind_vector(vg,x,y)
out.write(str(x)+','+str(y)+','+str(v[0])+','+str(v[1])+','+str(r)+','+str(degrees)+'\n')
| Python | 22 | 33.545456 | 101 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_velocities.py | 0.554533 | 0.529566 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import sys
import numpy as np
import matplotlib.pyplot as plt
#from matplotlib.patches import Circle
import math
def W(x, y):
"""Return the wind vector given a wind speed."""
r = np.sqrt(x*x+y*y)
v = V(r)
if r>0:
costheta = x/r
sintheta = y/r
return [-sintheta*v,costheta*v]
else:
return [0,0]
def V(r):
return 2*r*r*np.exp(-r)
def example(n):
# Grid of x, y points
nx, ny = n, n
x = np.linspace(-2, 2, nx)
y = np.linspace(-2, 2, ny)
# Wind field vector components U,V
U, V = np.zeros((ny, nx)), np.zeros((ny, nx))
for j in range(ny-1,-1,-1):
for i in range(0,nx):
vv = W(x[i],y[j])
U[j,i]=vv[0]
V[j,i]=vv[1]
fig = plt.figure()
ax1 = fig.add_subplot(1,1,1)
# Plot the streamlines.
ax1.streamplot(x, y, U, V, color=np.sqrt(U*U+V*V), cmap='Spectral')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax1.set_xlim(-2,2)
ax1.set_ylim(-2,2)
ax1.set_aspect('equal')
plt.title('Tangential Wind Vectors')
plt.show()
if __name__=='__main__':
example(8)
| Python | 49 | 21.897959 | 71 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/ad_hoc/wind_vector_example.py | 0.536955 | 0.511131 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | from geopy.distance import geodesic
def geodistkm(x1,y1,x2,y2):
'''
Returns the geodesic distance in km given two pairs of (lon, lat) coordinates.
Note: Because it uses geopy, the coordinate order is reversed to (lat,lon)
before calling the geopy function.
:param x1: lon of the first point.
:param y1: lat of the first point.
:param x2: lon of the second point.
:param y2: lat of the second point.
:return: Geodesic distance between the two points in km.
'''
return geodesic((y1,x1),(y2,x2)).km
| Python | 14 | 37.642857 | 82 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/utils/gis.py | 0.685767 | 0.663586 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | # Author: Steven Brus
# Date: August, 2019
# Description: Interpolates CFSR atmospheric reanalysis data onto the MPAS-O mesh and
# creates an input file to support time varying atmospheric forcing in the model
import netCDF4
import matplotlib.pyplot as plt
import numpy as np
import glob
import pprint
import datetime
import os
import yaml
import subprocess
import argparse
import cartopy
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from scipy import interpolate
import write_forcing_file
plt.switch_backend('agg')
cartopy.config['pre_existing_data_dir'] = \
os.getenv('CARTOPY_DIR', cartopy.config.get('pre_existing_data_dir'))
##################################################################################################
##################################################################################################
def interpolate_data_to_grid(grid_file,data_file,var):
# Open files
data_nc = netCDF4.Dataset(data_file,'r')
grid_nc = netCDF4.Dataset(grid_file,'r')
# Get grid from data file
lon_data = data_nc.variables['lon'][:]
lon_data = np.append(lon_data,360.0)
lat_data = np.flipud(data_nc.variables['lat'][:])
time = data_nc.variables['time'][:]
nsnaps = time.size
nlon = lon_data.size
nlat = lat_data.size
data = np.zeros((nsnaps,nlat,nlon))
print(data.shape)
# Get grid from grid file
lon_grid = grid_nc.variables['lonCell'][:]*180.0/np.pi
lat_grid = grid_nc.variables['latCell'][:]*180.0/np.pi
grid_points = np.column_stack((lon_grid,lat_grid))
ncells = lon_grid.size
interp_data = np.zeros((nsnaps,ncells))
print(interp_data.shape)
print(np.amin(lon_grid),np.amax(lon_grid))
print(np.amin(lat_grid),np.amax(lat_grid))
# Interpolate timesnaps
for i,t in enumerate(time):
print('Interpolating '+var+': '+str(i))
# Get data to interpolate
data[i,:,0:-1] = np.flipud(data_nc.variables[var][i,:,:])
data[i,:,-1] = data[i,:,0]
# Interpolate data onto new grid
interpolator = interpolate.RegularGridInterpolator((lon_data,lat_data),data[i,:,:].T,bounds_error=False,fill_value=0.0)
interp_data[i,:] = interpolator(grid_points)
# Deal with time
ref_date = data_nc.variables['time'].getncattr('units').replace('hours since ','').replace('.0 +0:00','')
ref_date = datetime.datetime.strptime(ref_date,'%Y-%m-%d %H:%M:%S')
xtime = []
for t in time:
date = ref_date + datetime.timedelta(hours=np.float64(t))
xtime.append(date.strftime('%Y-%m-%d_%H:%M:%S'+45*' '))
xtime = np.array(xtime,'S64')
return lon_grid,lat_grid,interp_data,lon_data,lat_data,data,xtime
##################################################################################################
##################################################################################################
def plot_interp_data(lon_data,lat_data,data,lon_grid,lat_grid,interp_data,var_label,var_abrev,time):
# Plot data
fig = plt.figure()
levels = np.linspace(np.amin(data),np.amax(data),100)
ax0 = fig.add_subplot(2, 1, 1, projection=ccrs.PlateCarree())
cf = ax0.contourf(lon_data, lat_data, data, levels=levels,
transform=ccrs.PlateCarree())
ax0.set_extent([0, 359.9, -90, 90], crs=ccrs.PlateCarree())
ax0.add_feature(cfeature.LAND, zorder=100)
ax0.add_feature(cfeature.LAKES, alpha=0.5, zorder=101)
ax0.add_feature(cfeature.COASTLINE, zorder=101)
ax0.set_title('data '+time.strip().decode())
cbar = fig.colorbar(cf,ax=ax0)
cbar.set_label(var_label)
# Plot interpolated data
ax1 = fig.add_subplot(2, 1, 2, projection=ccrs.PlateCarree())
levels = np.linspace(np.amin(interp_data),np.amax(interp_data),100)
cf = ax1.tricontourf(lon_grid,lat_grid,interp_data,levels=levels,
transform=ccrs.PlateCarree())
ax1.set_extent([0, 359.9, -90, 90], crs=ccrs.PlateCarree())
ax1.add_feature(cfeature.LAND, zorder=100)
ax1.add_feature(cfeature.LAKES, alpha=0.5, zorder=101)
ax1.add_feature(cfeature.COASTLINE, zorder=101)
ax1.set_title('interpolated data '+time.strip().decode())
cbar = fig.colorbar(cf,ax=ax1)
cbar.set_label(var_label)
# Save figure
fig.tight_layout()
fig.savefig(var_abrev+'_'+str(i).zfill(4)+'.png',box_inches='tight')
plt.close()
##################################################################################################
##################################################################################################
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--plot',action='store_true')
args = parser.parse_args()
nplot = 10
# Files to interpolate to/from
grid_file = './mesh.nc'
wind_file = './wnd10m.nc'
pres_file = './prmsl.nc'
forcing_file = 'atmospheric_forcing.nc'
# Interpolation of u and v velocities
lon_grid,lat_grid,u_interp,lon_data,lat_data,u_data,xtime = interpolate_data_to_grid(grid_file,wind_file,'U_GRD_L103')
lon_grid,lat_grid,v_interp,lon_data,lat_data,v_data,xtime = interpolate_data_to_grid(grid_file,wind_file,'V_GRD_L103')
# Calculate and plot velocity magnitude
if args.plot:
for i in range(u_data.shape[0]):
if i % nplot == 0:
print('Plotting vel: '+str(i))
data = np.sqrt(np.square(u_data[i,:,:]) + np.square(v_data[i,:,:]))
interp_data = np.sqrt(np.square(u_interp[i,:]) + np.square(v_interp[i,:]))
plot_interp_data(lon_data,lat_data,data,lon_grid,lat_grid,interp_data,'velocity magnitude','vel',xtime[i])
# Interpolation of atmospheric pressure
lon_grid,lat_grid,p_interp,lon_data,lat_data,p_data,xtime = interpolate_data_to_grid(grid_file,pres_file,'PRMSL_L101')
# Plot atmopheric pressure
if args.plot:
for i in range(p_data.shape[0]):
if i % nplot == 0:
print('Plotting pres: '+str(i))
plot_interp_data(lon_data,lat_data,p_data[i,:,:],lon_grid,lat_grid,p_interp[i,:],'atmospheric pressure','pres',xtime[i])
# Write to NetCDF file
subprocess.call(['rm',forcing_file])
write_forcing_file.write_to_file(forcing_file,u_interp,'windSpeedU',xtime)
write_forcing_file.write_to_file(forcing_file,v_interp,'windSpeedV',xtime)
write_forcing_file.write_to_file(forcing_file,p_interp,'atmosPressure',xtime)
| Python | 165 | 36.696968 | 128 | /testing_and_setup/compass/ocean/hurricane/scripts/interpolate_time_varying_forcing.py | 0.61646 | 0.59701 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | import numpy as np
from structures.geogrid import GeoGrid
from profile_model.radialprofiles import HollandWindSpeedProfile
from winds.parameters import Parameters
from winds.velocities import Velocities
import matplotlib.pyplot as plt
def test_velocity_grid():
# Grid of x, y points
n = 50
nr = 200
rmax = 40
cmin, cmax = -200 , 200
cellsize = (cmax-cmin)/n
x = np.linspace(cmin, cmax, n)
y = np.linspace(cmin, cmax, n)
U = GeoGrid(cmin,cmin,n,n,cellsize)
V = GeoGrid(cmin,cmin,n,n,cellsize)
params = Parameters()
b = 1.4
hc = [0,0]
vf = [0,10]
deltap = 100
coriol = False
profile = HollandWindSpeedProfile(nr,2*cmax,rmax,deltap,params.rho,params.getCoriolisMid(),b,coriolis=coriol)
vels = Velocities(vf[0],vf[1],profile.getVmax())
for j in range(0,n):
for i in range(0,n):
pt = U.getCenter(i,j)
r = np.sqrt(pow(pt[0]-hc[0],2)+pow(pt[1]-hc[1],2))
vg = profile.getValue(r)
vv = vels.compute_wind_vector(vg,pt[0],pt[1])
U.put(i,j,vv[0])
V.put(i,j,vv[1])
assert True # If we made it to here.
fig = plt.figure()
ax = fig.add_subplot(131)
ax.plot(profile.rvals, profile.profile)
ax.set(xlabel='r (km)', ylabel='wind speed (km/hr)',
title='Radial Wind')
ax1 = fig.add_subplot(133)
# Plot the streamlines.
# Matplotlib assume an ordinary row ordering, so the rows must be reversed before plotting.
Ug = U.grid
Vg = V.grid
Uplt = np.zeros([n,n])
Vplt = np.zeros([n,n])
for j in range(0,n):
jp = n-j-1
for i in range(0,n):
Uplt[jp,i]=Ug[j,i]
Vplt[jp,i]=Vg[j,i]
Vmag = np.sqrt(Ug*Ug+Vg*Vg)
ax1.streamplot(x, y, Uplt, Vplt, color=Vmag, cmap='Spectral')
ax1.set_xlabel('$x$')
ax1.set_ylabel('$y$')
ax1.set_xlim(cmin,cmax)
ax1.set_ylim(cmin,cmax)
ax1.set_aspect('equal')
plt.title('Wind Vectors')
plt.show()
| Python | 69 | 27.971014 | 113 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/winds/test_velocity_grid.py | 0.5945 | 0.5675 |
nairita87/Ocean_dir | refs/heads/ocean_coastal | from structures.geogrid import GeoGrid
def test_geogrid():
lon = -106.0
lat = 35
nlon = 8
nlat = 4
cellsize = 1.0
defaultValue = -1.0
grid = GeoGrid(lon,lat,nlon,nlat,cellsize,defaultValue = defaultValue)
assert grid.lon == lon
assert grid.lat == lat
assert grid.nlon == nlon
assert grid.nlat == nlat
assert grid.cellsize == cellsize
assert defaultValue == defaultValue
l = int(nlat/2)
k = int(nlon/2)
for j in range(0,l):
for i in range(0,k):
grid.put(i,j,1.0)
for i in range(k,nlon):
grid.put(i,j,2.0)
for j in range(l,nlat):
for i in range(0,k):
grid.put(i,j,3.0)
for i in range(k,nlon):
grid.put(i,j,4.0)
for j in range(0,l):
for i in range(0,k):
assert grid.getByIndex(i,j) == 1.0
for i in range(k,nlon):
assert grid.getByIndex(i,j) == 2.0
for j in range(l,nlat):
for i in range(0,k):
assert grid.getByIndex(i,j) == 3.0
for i in range(k,nlon):
assert grid.getByIndex(i,j) == 4.0
testcell = [3,3]
center = grid.getCenter(testcell[0],testcell[1])
centerx = lon + (testcell[0]+0.5)*cellsize
centery = lat + (testcell[1]+0.5)*cellsize
assert center[0] == centerx
assert center[1] == centery
index = grid.getIndex(centerx,centery)
assert index[0] == testcell[0]
assert index[1] == testcell[1]
value = grid.getByIndex(testcell[0],testcell[1])
testcoords = grid.getCenter(testcell[0],testcell[1])
valuec = grid.getByCoordinate(testcoords[0],testcoords[1])
assert value == valuec
origin = grid.getOrigin()
assert origin[0] == lon
assert origin[1] == lat
bounds = grid.bounds
assert bounds[0] == lon
assert bounds[1] == lon + nlon*cellsize
assert bounds[2] == lat
assert bounds[3] == lat + nlat*cellsize
assert grid.indexInside(-1,l) == False
assert grid.indexInside(k,l) == True
assert grid.indexInside(nlon,l) == False
assert grid.indexInside(k,-1) == False
assert grid.indexInside(k,l) == True
assert grid.indexInside(k,nlat) == False
assert grid.coordinateInside(bounds[0]+cellsize,bounds[2]+cellsize) == True
assert grid.coordinateInside(bounds[0]-cellsize,bounds[2]+cellsize) == False
assert grid.coordinateInside(bounds[0]+cellsize,bounds[2]-cellsize) == False
assert grid.coordinateInside(bounds[1]-cellsize,bounds[2]+cellsize) == True
assert grid.coordinateInside(bounds[1]-cellsize,bounds[2]-cellsize) == False
assert grid.coordinateInside(bounds[1]+cellsize,bounds[2]+cellsize) == False
assert grid.coordinateInside(bounds[0]+cellsize,bounds[3]-cellsize) == True
assert grid.coordinateInside(bounds[0]+cellsize,bounds[3]+cellsize) == False
assert grid.coordinateInside(bounds[0]-cellsize,bounds[3]+cellsize) == False
assert grid.coordinateInside(bounds[1]-cellsize,bounds[3]-cellsize) == True
assert grid.coordinateInside(bounds[1]-cellsize,bounds[3]+cellsize) == False
assert grid.coordinateInside(bounds[1]+cellsize,bounds[3]-cellsize) == False
grid.clear()
for j in range(0,nlat):
for i in range(0,nlon):
assert grid.getByIndex(i,j) == 0.0
| Python | 94 | 33.936169 | 80 | /testing_and_setup/compass/ocean/hurricane/hurricane_wind_pressure/tests/structures/test_geogrid.py | 0.635312 | 0.606697 |
tamuell/my-first-blog | refs/heads/master | name = "Tatiana"
print(name)
if 3 > 2:
print("It works!")
if 5 > 2:
print("5 is indeed greater than 2")
else:
print("5 is not greater than 2")
name = 'Tatiana'
if name == 'Ola':
print('Hey Ola!')
elif name == 'Tatiana':
print('Hey Tatiana!')
else:
print('Hey anonymous!')
def hi():
print('Hi there!')
print('How are you?')
hi ()
def hi(name):
print('Hi ' + name + '!')
hi("Tatiana")
| Python | 25 | 15.36 | 36 | /Testdatei.py | 0.578049 | 0.558537 |
yueyoum/smoke | refs/heads/master | import sys
from wsgiref.simple_server import make_server
sys.path.append('..')
from app import App
from smoke.exceptions import EmailExceptionMiddleware
def exception_func_1():
return exception_func_2()
def exception_func_2():
return exception_func_3()
def exception_func_3():
return 1 / 0
app = EmailExceptionMiddleware(
App(exception_func_1),
smoke_html=True,
to_address=[],
smtp_server='127.0.0.1'
)
server = make_server('127.0.0.1', 8000, app)
server.serve_forever()
| Python | 28 | 17.142857 | 53 | /test/mail_exception_test.py | 0.69685 | 0.649606 |
yueyoum/smoke | refs/heads/master | class App(object):
def __init__(self, hook_func=None):
self.hook_func = hook_func
def __call__(self, environ, start_response):
html = """<html>
<body><table>{0}</table></body>
</html>"""
def _get_env(k, v):
return """<tr><td>{0}</td><td>{1}</td></tr>""".format(k, v)
env_table = ''.join( [_get_env(k, v) for k, v in sorted(environ.items())] )
html = html.format(env_table)
status = '200 OK'
headers = [
('Content-Type', 'text/html'),
('Content-Length', str(len(html)))
]
start_response(status, headers)
if self.hook_func:
self.hook_func()
return [html]
if __name__ == '__main__':
from wsgiref.simple_server import make_server
app = App()
server = make_server('127.0.0.1', 8000, app)
server.handle_request()
| Python | 31 | 27.67742 | 83 | /test/app.py | 0.506187 | 0.488189 |
yueyoum/smoke | refs/heads/master | # -*- coding: utf-8 -*-
import sys
import traceback
class ExceptionMiddleware(object):
def __init__(self, wrap_app, smoke_html=False):
self.wrap_app = wrap_app
self.smoke_html = smoke_html
def __call__(self, environ, start_response):
try:
return self.wrap_app(environ, start_response)
except:
tb_exc = traceback.format_exc()
exc_info = sys.exc_info()
self.handle_exception(tb_exc, exc_info)
if not self.smoke_html:
raise
status = '500 Internal Server Error'
start_response(
status,
[('Content-Type', 'text/html')],
exc_info
)
tb_exc = tb_exc.replace('\n', '<br/>').replace(' ', ' ')
html = """<html>
<head><title>%s</title></head>
<body>
<h1>%s</h1>
<p>%s</p>
</body>
</html>
""" % (status, status, tb_exc)
return [html]
def handle_exception(self, tb_exc, exc_info):
raise NotImplementedError
class EmailExceptionMiddleware(ExceptionMiddleware):
"""This is an Example, In production, It's better not send emails in sync mode.
Because sending emails maybe slow, this will block your web app.
So, the best practices is write your own EmailExceptionMiddleware,
In this class, It's handle_exception method not send mail directly,
You shoul use MQ, or something else.
"""
def __init__(self,
wrap_app,
smoke_html=False,
from_address=None,
to_address=None,
smtp_server=None,
smtp_port=25,
smtp_username=None,
smtp_password=None,
mail_subject_prefix=None,
mail_template=None):
assert isinstance(to_address, (list, tuple)) and smtp_server is not None, "Email Config Error"
self.from_address = from_address
self.to_address = to_address
self.smtp_server = smtp_server
self.smtp_port = smtp_port
self.smtp_username = smtp_username
self.smtp_password = smtp_password
self.mail_subject_prefix = mail_subject_prefix
self.mail_template = mail_template
super(EmailExceptionMiddleware, self).__init__(wrap_app, smoke_html=smoke_html)
def handle_exception(self, tb_exc, exc_info):
from smoke.functional import send_mail
send_mail(
self.smtp_server,
self.smtp_port,
self.smtp_username,
self.smtp_password,
self.from_address,
self.to_address,
'{0} Error Occurred'.format(self.mail_subject_prefix if self.mail_subject_prefix else ''),
tb_exc,
'html'
)
| Python | 87 | 32.379311 | 102 | /smoke/exceptions.py | 0.543546 | 0.540448 |
yueyoum/smoke | refs/heads/master | from mail import send_mail
| Python | 1 | 26 | 26 | /smoke/functional/__init__.py | 0.814815 | 0.814815 |
vkhvorostianyi/airflow_practice | refs/heads/master | from datetime import timedelta, datetime
import json
import time
import os
import airflow
from urllib.request import urlopen
import pandas as pd
import http.client
import configparser
from airflow import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.utils.dates import days_ago
import airflow.hooks.S3_hook
import boto3
s3 = boto3.resource('s3')
config = configparser.ConfigParser()
config.read(f"{os.path.expanduser('~')}/airflow/api.config")
def get_api_data():
print(os.getcwd())
conn = http.client.HTTPSConnection("tiktok.p.rapidapi.com")
headers = {
'x-rapidapi-key': config["rapidapi"]["API_RAPIDAPI_KEY"],
'x-rapidapi-host': "tiktok.p.rapidapi.com"
}
conn.request("GET", "/live/trending/feed", headers=headers)
res = conn.getresponse()
data = res.read()
json_data = json.loads(data.decode("utf-8"))
return json_data
def get_clean_data(**context):
video_data = []
author_data = []
media = context['task_instance'].xcom_pull(task_ids='get_data', key='return_value').get('media')
if media:
for item in media:
video_attr = (
item["video_id"],
item["create_time"],
item["description"],
item["video"]["playAddr"],
item['statistics']
)
author_attr = (
item['author']['nickname'],
item['author']['uniqueId'],
item['author']['followers'],
item['author']['heartCount'],
item['author']['videoCount']
)
video_data.append(video_attr)
author_data.append(author_attr)
author_df = pd.DataFrame(author_data, columns=('nickname', 'id', 'followers', 'heartCount', 'videoCount'))
video_df = pd.DataFrame(video_data, columns=('video_id', 'create_time', 'descriotion', 'playAddr', 'statistics'))
video_df["create_time"]= pd.to_datetime(video_df['create_time'].apply(lambda x: datetime.fromtimestamp(int(x))))
video_df.to_csv(f"{os.path.expanduser('~')}/airflow/data/video.csv", index=None)
author_df.to_csv(f"{os.path.expanduser('~')}/airflow/data/author.csv", index=None)
def upload_file_to_S3_with_hook(filename, key, bucket_name):
hook = airflow.hooks.S3_hook.S3Hook('aws_default')
hook.load_file(filename, key, bucket_name)
default_args = {
'owner': 'airflow',
'start_date': days_ago(5),
'email': ['airflow@my_first_dag.com'],
'email_on_failure': False,
'email_on_retry': False,
'retries': 1,
'retry_delay': timedelta(minutes=5),
}
with DAG(
'tiktok_dag',
default_args=default_args,
description='Our first DAG',
schedule_interval="*/2 * * * *",
) as dag:
get_data = PythonOperator(
task_id='get_data',
python_callable=get_api_data,
dag=dag
)
clean_data = PythonOperator(
task_id='clean_data',
python_callable=get_clean_data,
dag=dag,
provide_context=True
)
s3_tasks = []
for file in [f"{os.path.expanduser('~')}/airflow/data/author.csv",
f"{os.path.expanduser('~')}/airflow/data/video.csv"]:
upload_to_S3_task = PythonOperator(
task_id=f'upload_to_S3_{file.split("/")[-1]}',
python_callable=upload_file_to_S3_with_hook,
op_kwargs={
'filename': file,
'key': f'{datetime.now().strftime("%Y-%b-%d/%H-%M")}-{file.split("/")[-1]}',
'bucket_name': f'tiktok-fun',
},
dag=dag)
s3_tasks.append(upload_to_S3_task)
opr_end = BashOperator(task_id='opr_end', bash_command='echo "Done"')
get_data >> clean_data >> s3_tasks >> opr_end
| Python | 124 | 32.209679 | 124 | /dags/tiktok_dag.py | 0.56338 | 0.558038 |
Pudit/FarewellSI126 | refs/heads/main | #import libraries
from bs4 import BeautifulSoup
from urllib.request import urlopen
import urllib.error
import pandas as pd
#define func to find subfolder
def find_folder(student_id: int):
if student_id < 1 :
return None
elif student_id <= 50 :
return "001-050"
elif student_id <= 100 :
return "051-100"
elif student_id <= 150 :
return "101-150"
elif student_id <= 200 :
return "151-200"
elif student_id <= 250 :
return "201-250"
elif student_id <= 300 :
return "251-300"
elif student_id <= 326 :
return "301-326"
else:
return None
# define func to get url
def url_si(student_id):
return f"https://sites.google.com/view/seniorfarewell2021/mirror/{find_folder(i)}/{i:03d}"
# create blank list to collect url and HTTP response code
urllist = list()
checkerlist = list()
for i in range(326 + 1):
urllist.append(url_si(i))
urllist[0] = ""
#check that each person is exist or not
for i in range(327):
try:
urlopen(url_si(i))
except urllib.error.HTTPError as e:
checkerlist.append(404)
else:
checkerlist.append(200)
# finding name and real google doc path
namelist = list()
formlist = list()
for i in range(327):
if checkerlist[i] == 200:
bsObj = BeautifulSoup(urlopen(urllist[i]))
title = bsObj.find("h1").getText()
gform = bsObj.find_all("a", href=True)[-2]['href']
namelist.append(title)
formlist.append(gform)
else:
namelist.append("NotFound 404")
formlist.append("404 Not Found")
#Check GSX, send to my high-school classmates
#Because of duplicated nickname, plz check manually
is_gsx = [False] * 327 #0 to 326 people in SI126 code
is_gsx[11] = True # Max
is_gsx[12] = True # Film
is_gsx[23] = True # Pea
is_gsx[26] = True # Poom
is_gsx[28] = True # Win Sukrit
is_gsx[33] = True # Krit Kitty
is_gsx[37] = True # Ball
is_gsx[59] = True # Ji
is_gsx[61] = True # Tong
is_gsx[104] = True # Now
is_gsx[130] = True # Pond
is_gsx[139] = True # Thames
is_gsx[142] = True # Win Nawin
is_gsx[147] = True # Jan
is_gsx[164] = True # Mhee
is_gsx[185] = True # Jane Glasses
is_gsx[200] = True # Ana
is_gsx[209] = True # Jane Juice
is_gsx[232] = True # Fangpao
is_gsx[277] = True # Guggug
is_gsx[285] = True # Ken Whale
is_gsx[290] = True # Bell Tao
#create pandas dataframe from lists
si126_df = pd.DataFrame({
'url': urllist,
'formlink':formlist,
'title' : namelist,
'status': checkerlist,
"GSX" : is_gsx
})
#save dataframe to csv
si126_df.to_csv("si126_namelist.csv")
#cleaning some minor texts manually!, add some missing names, strip texts, do on text editors
#read csv file after cleaning some dirts
si126_df = pd.read_csv("si126_namelist.csv")
#find his/her nickname
si126_df["nickname"] = si126_df.title.str.split(" ",expand = True,n=1)[0]
#export to csv again
si126_df.to_csv("si126_namelist.csv")
| Python | 120 | 23.883333 | 94 | /find_all_sites.py | 0.635664 | 0.570759 |
Pudit/FarewellSI126 | refs/heads/main | #import libraries
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from datetime import datetime
import pandas as pd
#path for webdriver
driverpath = "PATH for your chromedriver"
#load data from csv file
df = pd.read_csv("si126_namelist.csv")
urllist = list(df[df.GSX == True].formlink)
namelist = list(df[df.GSX == True].nickname)
#sending mail merge
for i in range(len(urllist)):
#rest time from previous session
driver = webdriver.Chrome(driverpath)
time.sleep(3)
sending_url = driver.get(urllist[i])
send_to = namelist[i]
time.sleep(1)
sender_txt = "@sikawit"
greeting_txt = f"""Hi {send_to.strip()}!
ยินดีด้วยครับคุณหมอ ในที่สุดก็เดินทางมาถึงเส้นชัยที่ยากที่สุดทางหนึ่งละครับ (ซึ่งผมขอหนีไปก่อน 555) ขอให้หมอเป็นหมอที่ดีครับ หวังว่าคงได้เจอกัน (คงไม่ใช่ในฐานะคนไข้นะ) หากมีอะไรที่ให้ช่วยได้ก็บอกมาได้ครัชช
ยินดีอีกครั้งครับ
Sake
*****
Generated from a bot on {datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S UTC%Z")}
Find out more at https://github.com/sikawit/FarewellSI126"""
sender_fill = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[1]/div/div/div[2]/div/div[1]/div/div[1]/input')
sender_fill.send_keys(sender_txt)
greeting_fill = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[2]/div[2]/div/div/div[2]/div/div[1]/div[2]/textarea')
greeting_fill.send_keys(greeting_txt)
submit = driver.find_element_by_xpath('/html/body/div/div[2]/form/div[2]/div/div[3]/div[1]/div/div/span')
submit.click()
time.sleep(3)
driver.close()
| Python | 52 | 30.057692 | 205 | /write_mirrors.py | 0.687693 | 0.66914 |
Jegajeeth/res-req-in-fast-api | refs/heads/main | from fastapi import FastAPI
from fastapi.responses import HTMLResponse as hr
from fastapi.responses import RedirectResponse as rr
from fastapi.responses import FileResponse
app = FastAPI()
file_path="TinDog-start-masrter2/index.html"
@app.get("/")
async def rout():
return FileResponse(file_path)
@app.get("/reshtml", response_class=hr)
async def rout():
return """<html>
<body><h1>fsdfdfs</h1></body>
</html>
"""
@app.get("/item/{item}")
async def item(item):
return item
@app.get("/redirectex", response_class = rr)
async def redirect():
return "https://google.com/"
| Python | 30 | 19.799999 | 52 | /app.py | 0.642202 | 0.637615 |
steveyeh987/Data-Science | refs/heads/master | import sys
import ssl
import urllib
import matplotlib.pyplot as plt
def Parse_File(link):
context = ssl._create_unverified_context()
f = urllib.request.urlopen(link, context=context)
data = f.read().decode('utf-8').split('\n')
e = [i.split(',') for i in data[2:7]]
a = [i.split(',') for i in data[8:11]]
w = [i.split(',') for i in data[12:15]]
E = ['Education level']
A = ['Average monthly income']
W = ['Working environment']
lst = [E, A, W]
for index, cl in enumerate([e, a, w]):
total_pop = 0.0
x_tick = []
M = []
F = []
T = []
Non_smoke = []
for row in cl:
x_tick.append(row[0])
temp = list(map(float, row[1:]))
M.append(temp[1])
F.append(temp[3])
T.append(float("{0:.1f}".format((temp[0]*temp[1]+temp[2]*temp[3])/(temp[0]+temp[2]))))
Non_smoke.append(temp[0]*(1-temp[1]/100)+temp[2]*(1-temp[3]/100))
total_pop += (temp[0]*(1-temp[1]/100)+temp[2]*(1-temp[3]/100))
Non_smoke = [float("{0:.1f}".format(i/total_pop)) for i in Non_smoke]
lst[index].extend([x_tick, M, F, T, Non_smoke])
return E, A, W
def Data_Class(s):
assert s in ['E', 'A', 'W'], "Cannot find class type {} !".format(s)
data = []
if s == 'E':
data = E
elif s == 'A':
data = A
else:
data = W
return data
def Chart(s, data):
assert s in ['l', 'b', 'p'], "Cannot find chart type {} !".format(s)
n = len(data[1])
fig, ax = plt.subplots(figsize=(15, 8))
ax.set_xticks(range(n))
ax.set_xticklabels(data[1], ha='center')
ax.tick_params(labelsize=9)
if s == 'l':
ax.plot(range(n), data[2], marker='s', label='Male')
ax.plot(range(n), data[3], marker='o', label='Female')
ax.plot(range(n), data[4], marker='^', label='Total')
for pop in data[2:5]:
for i, j in zip(range(n), pop):
ax.text(i+0.1, j+0.1, str(j), ha='center', va='bottom', fontsize=10)
ax.set_title("Smoking Percentage vs {}".format(data[0]), fontsize=11)
ax.set_xlabel(data[0], fontsize=9)
ax.set_ylabel('Smoking Percentage (%)', fontsize=9)
ax.set_xlim([-0.5, n-0.5])
plt.legend(loc='upper right', prop={"size":10})
plt.show()
elif s == 'b':
width=0.15
rects1 = ax.bar([i-1.5*width for i in range(n)], data[2], width=width, label='Male', color='b')
rects2 = ax.bar([i-0.5*width for i in range(n)], data[3], width=width, label='Female', color='r')
rects3 = ax.bar([i+0.5*width for i in range(n)], data[4], width=width, label='Total', color='y')
for rects in [rects1, rects2, rects3]:
for rect in rects:
h = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.01*h, h,
ha='center', va='bottom', fontsize=10)
ax.set_title("Smoking Percentage vs {}".format(data[0]), fontsize=11)
ax.set_xlabel(data[0], fontsize=9)
ax.set_ylabel('Smoking Percentage (%)', fontsize=9)
ax.set_xlim([-0.5, n-0.5])
plt.legend(loc='upper right', prop={"size":10})
plt.show()
else:
ax.pie(data[5], labels=data[1], autopct='%1.1f%%',)
ax.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.
ax.set_title("Proportion of different {} in non-smoking population".format(data[0]), fontsize=11, y=1.08)
plt.show()
if __name__ == '__main__':
link = "https://ceiba.ntu.edu.tw/course/481ea4/hw1_data.csv"
E, A, W = Parse_File(link)
for arg in sys.argv[1:]:
if arg.startswith('-'):
arg = arg[1:]
cl = Data_Class(arg[0])
Chart(arg[1], cl)
| Python | 112 | 33.839287 | 113 | /hw1/hw1.py | 0.512912 | 0.47865 |
sainarasimhayandamuri/LOGS-ANALYSIS-1 | refs/heads/master | #! /usr/bin/env python3
import psycopg2
import time
def connects():
return psycopg2.connect("dbname=news")
data1="select title,views from article_view limit 3"
data2="select * from author_view"
data3="select to_char(date,'Mon DD,YYYY') as date,err_prc from err_perc where err_prc>1.0"
def popular_article(data1):
db=connects()
c=db.cursor()
c.execute(data1)
results=c.fetchall()
for result in range(len(results)):
title=results[result][0]
views=results[result][1]
print("%s--%d" % (title,views))
db.close()
def popular_authors(data2):
db=connects()
c=db.cursor()
c.execute(data2)
results=c.fetchall()
for result in range(len(results)):
name=results[result][0]
views=results[result][1]
print("%s--%d" % (name,views))
db.close()
def error_percent(query3):
db=connects()
c=db.cursor()
c.execute(data3)
results=c.fetchall()
for result in range(len(results)):
date=results[result][0]
err_prc=results[result][1]
print("%s--%.1f %%" %(date,err_prc))
if __name__ == "__main__":
print("THE LIST OF POPULAR ARTICLES ARE:")
popular_article(data1)
print("\n")
print("THE LIST OF POPULAR AUTHORS ARE:")
popular_authors(data2)
print("\n")
print("PERC ERROR MORE THAN 1.0:")
error_percent(data3)
| Python | 51 | 25.411764 | 90 | /newsdata.py | 0.62426 | 0.60429 |
shrued/webscraping-playground | refs/heads/master | import requests
from bs4 import BeautifulSoup
response = requests.get(
url="https://en.wikipedia.org/wiki/Toronto_Stock_Exchange",
)
soup = BeautifulSoup(response.content, 'html.parser')
table = soup.find_all('table')
print(table) | Python | 10 | 22.4 | 60 | /scrape.py | 0.763949 | 0.759657 |
JeyFernandez/Crud-en-python | refs/heads/main | from tkinter import ttk
from tkinter import *
import sqlite3
class Product:
db_name = 'matricula.db'
def __init__(self, box):
self.box=box
self.box.title('Registro De Estudiante')
frame = LabelFrame(self.box, text='Datos del estudiante')
frame.grid(row = 0, column = 0, columnspan= 3, pady= 20)
#Espacio nombres
Label(frame, text= 'Nombres y apellidos: ').grid(row = 1, column = 0)
self.nombre = Entry (frame)
self.nombre.focus()
self.nombre.grid(row = 1, column = 1)
#Espacio edad
Label(frame, text='NuCedula: ').grid(row=2,column=0)
self.edad=Entry (frame)
self.edad.grid(row=2,column=1)
#Espacio Cedula
Label(frame, text='Direccion: ').grid(row=3, column= 0)
self.cedula = Entry(frame)
self.cedula.grid(row=3, column=1)
#Espacio Celular
Label(frame, text='NuTelular: ').grid(row=4, column=0)
self.celular = Entry(frame)
self.celular.grid(row=4, column=1)
#Boton agregar
ttk.Button(frame,text='Registrar').grid(row = 5,column = 0, columnspan=3, sticky = W+E)
#mensaje
self.menssage = Label(text='',fg='red')
self.menssage.grid(row=3,column=0,columnspan=2,sticky=W+E)
#Tabla
self.tree = ttk.Treeview(height = 10,column= ('#1', '#2', '#3'))
self.tree.grid(row= 4, column= 0, columnspan=3)
self.tree.heading("#0", text = 'Nombre y Apellido', anchor = CENTER)
self.tree.heading("#1", text= 'NUmero de Cedula', anchor= CENTER)
self.tree.heading("#2", text= 'Direccion', anchor= CENTER)
self.tree.heading("#3", text= 'Numero de Telefono', anchor= CENTER)
#botones
ttk.Button(text='Eliminar').grid(row=5,column=0,sticky=W+E)
ttk.Button(text='Editar').grid(row=5, column=2,sticky=W+E)
self.get_Estudiante()
#conecto la base de datos
def run_query(self, query, parameters=()):
with sqlite3.connect(self.db_name) as conn:
cursor = conn.cursor()
result = cursor.execute(query, parameters)
conn.commit()
return result
#Metodo Onbtner estudiante
def get_estudiante(self):
#limpiar
records = self.tree.get_children()
for element in records:
self.tree.delete(element)
#consultar datos
query = 'SELC * FROM Estudiante ORDER BY name DESC'
db_rows = self.run_query(query)
#Rellenar datos
for row in db_rows:
self.tree.insert('',0,txt= row[1], values= row[3])
if __name__ == '__main__':
box = Tk()
sistema = Product(box)
box.mainloop() | Python | 89 | 30.157303 | 99 | /S_R_T.py | 0.572872 | 0.555556 |
garrettroth/Metaverse-Sicariis | refs/heads/main | import tweepy
from tweepy import OAuthHandler
import re
class TwitterClient(object):
'''
Twitter Class for grabbing Tweets
'''
def __init__(self):
'''
Initialization Method
'''
#Keys and Tokens from the Twitter Dev Console
consumer_key = 'osoPe1vbrjL6hi83pPaT99JcZ'
consumer_secret = '72ePjiWIu8YGRFSTXJdUiww12J6UcR0bJL556VSx73hfd7dwW0'
access_token = '1038587928967098368-uX8QbeIua1pXU33gzB5Tcy89qMPrgt'
access_token_secret = 'AohvvdBfkYILiwEouMpAfyVDP2TBX6xdLcmfyvAJqojcj'
#Attempt Authentication
try:
#Create OAuthhandler object
self.auth = OAuthHandler(consumer_key,consumer_secret)
#Set access token and secret
self.auth.set_access_token(access_token,access_token_secret)
#Create tweepy API object to fetch tweets
self.api = tweepy.API(self.auth)
except:
print("Error Authentication Failed")
def clean_tweet(self, tweet):
'''
Utility function to clean tweet text by removing links, special characters
using simple regex statements
'''
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def get_tweets(self, query, count = 1):
'''
Main Function to fetch tweets and parse them
'''
#Empty list to store parsed tweets
tweets = []
try:
#call twitter api to fetch tweets
fetch_tweets = self.api.search(q=query,count = count)
#parsing tweets one by one
for tweet in fetch_tweets:
print(tweet)
#empty dictionary to store required params of tweet
parsed_tweet = {}
#saving text of tweet
parsed_tweet['text'] = tweet.text
#appending parsed tweet to tweets list
if tweet.retweet_count > 0:
#if tweet has a retweet, ensure that is is append only once.
if parsed_tweet not in tweets:
tweets.append(parsed_tweet)
else:
tweets.append(parsed_tweet)
#return parsed tweet
return tweets
except tweepy.TweepError as e:
#print error
print("Error : " + str(e))
def main():
#Creating Object of twitter client class
api = TwitterClient()
#calling function to get tweets
tweets = api.get_tweets(query = 'Cryptocurrency', count = 1)
#print tweets
print(tweets)
#running program
main() | Python | 81 | 30.888889 | 102 | /twitter_api.py | 0.525169 | 0.520285 |
jerry5841314/Ensemble-Pytorch | refs/heads/master | import os
import time
import logging
def set_logger(log_file=None, log_console_level="info", log_file_level=None):
"""Bind the default logger with console and file stream output."""
def _get_level(level):
if level.lower() == 'debug':
return logging.DEBUG
elif level.lower() == 'info':
return logging.INFO
elif level.lower() == 'warning':
return logging.WARN
elif level.lower() == 'error':
return logging.ERROR
elif level.lower() == 'critical':
return logging.CRITICAL
else:
msg = (
"`log_console_level` must be one of {{DEBUG, INFO,"
" WARNING, ERROR, CRITICAL}}, but got {} instead."
)
raise ValueError(msg.format(level.upper()))
_logger = logging.getLogger()
# Reset
for h in _logger.handlers:
_logger.removeHandler(h)
rq = time.strftime('%Y_%m_%d_%H_%M', time.localtime(time.time()))
log_path = os.path.join(os.getcwd(), 'logs')
ch_formatter = logging.Formatter(
"%(asctime)s - %(levelname)s: %(message)s"
)
ch = logging.StreamHandler()
ch.setLevel(_get_level(log_console_level))
ch.setFormatter(ch_formatter)
_logger.addHandler(ch)
if log_file is not None:
print('Log will be saved in \'{}\'.'.format(log_path))
if not os.path.exists(log_path):
os.mkdir(log_path)
print('Create folder \'logs/\'')
log_name = os.path.join(log_path, log_file + '-' + rq + '.log')
print('Start logging into file {}...'.format(log_name))
fh = logging.FileHandler(log_name, mode='w')
fh.setLevel(
logging.DEBUG
if log_file_level is None
else _get_level(log_file_level)
)
fh_formatter = logging.Formatter(
"%(asctime)s - %(filename)s[line:%(lineno)d] - "
"%(levelname)s: %(message)s"
)
fh.setFormatter(fh_formatter)
_logger.addHandler(fh)
_logger.setLevel("DEBUG")
return _logger
| Python | 65 | 31.276922 | 77 | /torchensemble/utils/logging.py | 0.557197 | 0.557197 |
jerry5841314/Ensemble-Pytorch | refs/heads/master | import torch
import pytest
import numpy as np
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from torchensemble import FastGeometricClassifier as clf
from torchensemble import FastGeometricRegressor as reg
from torchensemble.utils.logging import set_logger
set_logger("pytest_fast_geometric")
# Testing data
X_test = torch.Tensor(np.array(([0.5, 0.5], [0.6, 0.6])))
y_test_clf = torch.LongTensor(np.array(([1, 0])))
y_test_reg = torch.FloatTensor(np.array(([0.5, 0.6])))
y_test_reg = y_test_reg.view(-1, 1)
# Base estimator
class MLP_clf(nn.Module):
def __init__(self):
super(MLP_clf, self).__init__()
self.linear1 = nn.Linear(2, 2)
self.linear2 = nn.Linear(2, 2)
def forward(self, X):
X = X.view(X.size()[0], -1)
output = self.linear1(X)
output = self.linear2(output)
return output
class MLP_reg(nn.Module):
def __init__(self):
super(MLP_reg, self).__init__()
self.linear1 = nn.Linear(2, 2)
self.linear2 = nn.Linear(2, 1)
def forward(self, X):
X = X.view(X.size()[0], -1)
output = self.linear1(X)
output = self.linear2(output)
return output
def test_fast_geometric_workflow_clf():
"""
This unit test checks the error message when calling `predict` before
calling `ensemble`.
"""
model = clf(estimator=MLP_clf, n_estimators=2, cuda=False)
model.set_optimizer("Adam")
# Prepare data
test = TensorDataset(X_test, y_test_clf)
test_loader = DataLoader(test, batch_size=2, shuffle=False)
# Training
with pytest.raises(RuntimeError) as excinfo:
model.evaluate(test_loader)
assert "Please call the `ensemble` method to build" in str(excinfo.value)
def test_fast_geometric_workflow_reg():
"""
This unit test checks the error message when calling `predict` before
calling `ensemble`.
"""
model = reg(estimator=MLP_reg, n_estimators=2, cuda=False)
model.set_optimizer("Adam")
# Prepare data
test = TensorDataset(X_test, y_test_reg)
test_loader = DataLoader(test, batch_size=2, shuffle=False)
# Training
with pytest.raises(RuntimeError) as excinfo:
model.evaluate(test_loader)
assert "Please call the `ensemble` method to build" in str(excinfo.value)
| Python | 85 | 26.435293 | 77 | /torchensemble/tests/test_fast_geometric.py | 0.653087 | 0.635935 |
fvicaria/fv-sectools | refs/heads/main | #!/usr/bin/env python
from distutils.core import setup
setup(name='fv-sectools',
description='A set of IP-based security checks for websites and applications',
long_description=open('README.rst').read(),
version='0.1dev',
author='F Vicaria',
author_email='fvicaria@hotmail.com',
url='http://www.vicaria.org/',
packages=['fv-sectools', ],
python_requires='>=3.6',
license='MIT License',
platforms=['Windows']
)
| Python | 15 | 29.533333 | 84 | /setup.py | 0.617904 | 0.60917 |
ljbelenky/murphy | refs/heads/master | from math import cos, sin, tan, atan, radians
import matplotlib.pyplot as plt
import numpy as np
from copy import deepcopy
from Murphy.link import Link
from Murphy.bedframe import Bedframe
from Murphy.murphy import Murphy
import sys
import pickle
class MurphyBed():
'''The MurphyBed Class represents a collection of Murphy objects, all of the same design, solved over the full range of angles from deployed (0) to stowed (90)'''
def __init__(self, bed, desired_deployed_height, desired_stowed_height):
self.bed = bed
self.desired_deployed_height, self.desired_stowed_height = desired_deployed_height, desired_stowed_height
self.collected_solutions = {}
def solve_over_full_range(self, steps):
for angle in np.linspace(0,90, steps):
self.bed.bedframe.angle = angle
self.bed.assemble()
self.collected_solutions[angle] = deepcopy(self.bed)
@property
def murphy_error(self):
'''murphy_error is the sum of all differences between current design and optimal design. Used to optimize fixed, positions and rigid components.
Calculation of Murphy Error requires collected_solutions for all angles between 0 and 90'''
deployed = self.collected_solutions[0]
stowed = self.collected_solutions[90]
errors = []
balance = np.array([5, 7, 2, 1, 1, 1, 50, 50, 1, 1,1])
# When deployed, the bed should be at desired height
errors.append((deployed.bedframe.y+deployed.bedframe.t-self.desired_deployed_height)**2)
# When deployed, the head of the bed should be close to the wall
errors.append(deployed.bedframe.x**2)
# When stowed, the bed should be flat up against the wall
errors.append((stowed.bedframe.x-stowed.bedframe.h_headboard)**2)
# When stowed, the foot of the bed should be at desired height below the window
errors.append((stowed.bedframe.y+stowed.bedframe.l - self.desired_stowed_height)**2)
# No part of the assembly should ever extend outside of the house
left_most = 0
for murphy in self.collected_solutions.values():
for component in [murphy.bedframe, murphy.A, murphy.B]:
left_most = min(left_most, component.extents['left'])
errors.append(left_most**2)
# when stowed, no part of the links should extend forward of the bedframe if it is above the floor
def stowed_encroachment(link):
if (link.extents['top'] > 0) and (link.extents['right'] > stowed.bedframe.x):
return (link.extents['right']-stowed.bedframe.x)**2
else: return 0
errors.append(max([stowed_encroachment(link) for link in [stowed.A, stowed.B]]))
# when deployed, no part of the links should extend above/forward of the bedframe
def deployed_encroachment(link):
if (link.extents['right'] > deployed.bedframe.x) and (link.extents['top'] > (deployed.bedframe.y+deployed.bedframe.t)):
return (link.extents['top'] - deployed.bedframe.y+deployed.bedframe.t)**2
else: return 0
errors.append(max([deployed_encroachment(link) for link in [deployed.A, deployed.B]]))
# the floor opening should not be much larger than the thickness of the beframe
floor_opening = 0
for murphy in self.collected_solutions.values():
for component in [murphy.bedframe, murphy.A, murphy.B]:
floor_opening = max(floor_opening, component.floor_opening)
if floor_opening > stowed.bedframe.x:
error = floor_opening**2
else:
error = 0
errors.append(error)
#the bed should be buildable
errors.append(max([i.ikea_error for i in self.collected_solutions.values()])**2)
# Link A,B Attachment point must be on the bedframe
for i in [self.bed.A, self.bed.B]:
x = i.attachment['x']
y = i.attachment['y']
if (0 < x < self.bed.bedframe.l) and (0 < y < self.bed.bedframe.t):
errors.append(0)
elif (0 < x < self.bed.bedframe.depth_of_headboard) and (0 < y < self.bed.bedframe.h_headboard):
errors.append(0)
else:
X,Y = self.bed.bedframe.CoG
errors.append((X-x)**2 + (Y-y)**2)
errors = (np.array(errors)/balance)
return errors.sum(), errors
def plot_all(murphy_bed):
ax = plt.figure().add_subplot(111)
for i in murphy_bed.collected_solutions.values():
for j in [i.bedframe, i.A, i.B]:
j.plot(ax)
plt.show()
def cycles(n=10):
if len(sys.argv) > 1:
try:
return int(sys.argv[1])
except:
pass
return n
def plot():
plt.plot(adjustments)
plt.show()
plt.plot(murphy_errors_history)
plt.show()
plot_all(murphy_bed)
if __name__ == '__main__':
angle_steps = 5
learning_rate = -.08
# The basic components of a bed
bedframe = Bedframe(10,4,10, 72, 12, 8)
A_link = Link(x=0,y=0,length=10,width=4,angle=80, color = 'r', bedframe = bedframe, attachment = (5,2))
B_link = Link(x=20, y = -1, length = 10, width = 4, angle = 110, color ='g', bedframe = bedframe, attachment = (18,2))
# A bed assembled at a single position
assembly = Murphy(bedframe, A_link, B_link)
# The complete solution of a bed from deployed to stowed
murphy_bed = MurphyBed(assembly, 15, 40)
# with open('murphy.pkl','rb') as f:
# murphy_bed = pickle.load(f)
murphy_bed.solve_over_full_range(angle_steps)
print('Initial Murphy Error: ', murphy_bed.murphy_error[0])
# initial_design = deepcopy(murphy_bed)
murphy_error_history = []
murphy_errors_history = []
adjustments = []
for i in range(cycles()):
print('#'*20+'\n'+str(i)+'\n'+'#'*20)
murphy_bed.bed = murphy_bed.collected_solutions[0]
variable = np.random.choice(np.array(['A.x','A.y', "A.attachment['x']", "A.attachment['y']", 'A.length', 'B.x','B.y','B.length', "B.attachment['x']", "B.attachment['y']"]))
print(variable)
errors = []
for step in ['+=0.5', '-=1']:
exec('murphy_bed.bed.{variable}{step}'.format(variable = variable, step=step))
murphy_bed.solve_over_full_range(angle_steps)
errors.append(murphy_bed.murphy_error[0])
partial_derivative = errors[0]-errors[1]
adjustment = partial_derivative*learning_rate + 0.5
exec('murphy_bed.bed.{variable}+={adjustment}'.format(variable = variable, adjustment = adjustment))
adjustments.append(adjustment)
murphy_bed.solve_over_full_range(angle_steps)
print('Adjusted Murphy Error: ', murphy_bed.murphy_error[0])
murphy_error_history.append(murphy_bed.murphy_error[0])
murphy_errors_history.append(murphy_bed.murphy_error[1])
if i%100==0:
with open('murphy.pkl', 'wb') as f:
pickle.dump(murphy_bed, f)
with open('murphy.pkl', 'wb') as f:
pickle.dump(murphy_bed, f)
plot() | Python | 173 | 40.468208 | 180 | /src/murphy.py | 0.616757 | 0.600864 |
ljbelenky/murphy | refs/heads/master | import numpy as np
from math import radians, sin, cos
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression as LR
class Bedframe():
def __init__(self, x,y, thickness, length, margin, angle):
'''Design elements'''
self.t = thickness
self.l = length
self.margin = margin # the distance from the edges to the point where a link can be connected
'''Current Position'''
self.x, self.y = x,y
'''Angle in degrees, 0 is deployed, 90 is stowed'''
self.angle = angle
@property
def lower_foot(self):
theta = radians(self.angle)
return self.x + self.l*cos(theta), self.y + self.l*sin(theta)
@property
def upper_foot(self):
theta = radians(self.angle)
x = self.x + self.l*cos(theta) - self.t*sin(theta)
y = self.y + self.l*sin(theta) + self.t*cos(theta)
return x, y
@property
def lower_head(self):
return self.x, self.y
@property
def upper_head(self):
theta = radians(self.angle)
x = self.x - self.t*sin(theta)
y = self.y + self.t*cos(theta)
return x,y
@property
def left_edge(self):
return min(self.lower_foot[0], self.lower_head[0], self.upper_foot[0], self.upper_head[0])
@property
def right_edge(self):
return max(self.lower_foot[0], self.lower_head[0], self.upper_foot[0], self.upper_head[0])
@property
def top(self):
return max(self.lower_foot[1], self.lower_head[1], self.upper_foot[1], self.upper_head[1])
@property
def bottom(self):
return min(self.lower_foot[1], self.lower_head[1], self.upper_foot[1], self.upper_head[1])
def _offset_point(self, p, p1, p2, offset):
x, y = p
x1, y1, = p1
x2, y2 = p2
#vector1
d1 = (((x1-x)**2 + (y1-y)**2)**.5)/offset
v1 = (x1-x)/d1, (y1-y)/d1
#vector from (x,y) to (x2,y2)
d2 = (((x2-x)**2 + (y2-y)**2)**.5)/offset
v2 = (x2-x)/d2, (y2-y)/d2
return x + v1[0] + v2[0], y + v1[1] + v2[1]
@property
def head_lower_margin(self):
return self._offset_point(self.lower_head, self.lower_foot, self.upper_head, self.margin)
@property
def head_upper_margin(self):
return self._offset_point(self.upper_head, self.lower_head, self.upper_foot, self.margin)
@property
def foot_lower_margin(self):
return self._offset_point(self.lower_foot, self.upper_foot, self.lower_head, self.margin)
@property
def foot_upper_margin(self):
return self._offset_point(self.upper_foot, self.upper_head, self.lower_foot, self.margin)
# @property
# def floor_opening(self):
# if (self.bottom >= 0) or (self.top <= 0):
# return 0
# #topside
# if np.sign(self.upper_head[1]) == np.sign(self.upper_foot[1]):
# topside = 0
# else:
# ys = np.array([[self.upper_head[1]], [self.upper_head[1]])
# xs = np.array([[self.upper_head[0]],[self.lower_head[0]]])
# topside = LR().fit(ys, xs).predict([[0]])[0]
def plot(self, ax = None):
color = 'k'
plot_here = False
if not ax:
ax = plt.figure().add_subplot(111)
ax.set_aspect('equal')
plot_here = True
xs = [self.lower_head[0], self.lower_foot[0], self.upper_foot[0], self.upper_head[0], self.lower_head[0]]
ys = [self.lower_head[1], self.lower_foot[1], self.upper_foot[1], self.upper_head[1], self.lower_head[1]]
ax.plot(xs, ys, color = color)
bounding_xs = [self.left_edge, self.right_edge, self.right_edge, self.left_edge, self.left_edge]
bounding_ys = [self.bottom, self.bottom, self.top, self.top, self.bottom]
ax.plot(bounding_xs, bounding_ys, color = 'gray')
ax.scatter(self.head_lower_margin[0], self.head_lower_margin[1])
ax.scatter(self.head_upper_margin[0], self.head_upper_margin[1])
ax.scatter(self.foot_upper_margin[0], self.foot_upper_margin[1])
ax.scatter(self.foot_lower_margin[0], self.foot_lower_margin[1])
# # ax.scatter(self.CoG[0], self.CoG[1], marker = 'X', color = color)
# # ax.scatter(self.floor_opening, 0, marker = 'o', color = color)
# ax.plot([self.extents['left'], self.extents['right'], self.extents['right'], self.extents['left'], self.extents['left']],
# [self.extents['bottom'], self.extents['bottom'], self.extents['top'], self.extents['top'], self.extents['bottom']],
# alpha = .1, color = color)
if plot_here: plt.show()
return ax
if __name__ == '__main__':
b = Bedframe(0,0, 15, 80, 2, 10)
b.plot()
plt.show()
| Python | 138 | 33.586956 | 131 | /src/Murphy/bedframe.py | 0.575199 | 0.552995 |
ljbelenky/murphy | refs/heads/master |
from math import sin, cos, radians, atan
import numpy as np
import matplotlib.pyplot as plt
class Link():
def __init__(self, x, y, length, width, angle, color, bedframe, attachment = None):
self.x, self.y = x, y
self.length, self.width = length, width
self.angle = angle
self.color = color
self.bedframe = bedframe
# Attachment point relative to the bedframe
self.attachment = {'x':attachment[0],'y':attachment[1]}
@property
def room_attachment(self):
# attachment point relative to the room
if self.attachment:
theta = radians(self.bedframe.angle)
l = ((self.attachment['x']**2)+(self.attachment['y']**2))**0.5
phi = atan(self.attachment['y']/self.attachment['x'])
x = self.bedframe.x + l*cos(theta + phi)
y = self.bedframe.y + l*sin(theta + phi)
return {'x':x, 'y':y}
else: return None
@property
def distal(self):
x, y, l, theta = self.x, self.y, self.length, radians(self.angle)
X = x + l * cos(theta)
Y = y + l * sin(theta)
return X,Y
@property
def edges(self):
x,y,w, theta = self.x, self.y, self.width/2, radians(self.angle)
X,Y = self.distal
x0 = x - w*sin(theta)
x1 = X - w*sin(theta)
y0 = y + w*cos(theta)
y1 = Y + w*cos(theta)
X0 = x + w*sin(theta)
X1 = X + w*sin(theta)
Y0 = y - w*cos(theta)
Y1 = Y - w*cos(theta)
return [((x0, y0), (x1, y1)), ((X0, Y0), (X1, Y1))]
@property
def extents(self):
left = min(self.x, self.distal[0]) - self.width/2
right = max(self.x, self.distal[0]) + self.width/2
top = max(self.y, self.distal[1]) + self.width/2
bottom = min(self.y, self.distal[1]) - self.width/2
return {'left':left, 'right':right, 'top':top, 'bottom':bottom}
@property
def floor_opening(self):
w = r = self.width/2
theta = radians(self.angle)
x,y = self.x, self.y
X,Y = self.distal
if abs(self.y) < r:
a0 = self.x + ((r**2)-(self.y)**2)**0.5
else:
a0 = 0
if abs(self.distal[1]) < w:
a1 = self.distal[0] + ((r**2)-self.distal[1]**2)**0.5
else:
a1 = 0
if y * Y < 0:
a2 = x - y*(X-x)/(Y-y) + abs(w/sin(theta))
else: a2 = 0
return max(a0,a1,a2)
@property
def CoG(self):
return (self.x+self.distal[0])/2, (self.y+self.distal[1])/2
@property
def ikea_error(self):
'''Ikea error is the assembly error, or the distance from the distal point of a link to its intended attachment point'''
if self.attachment:
fit_error = ((self.distal[0]-self.room_attachment['x'])**2+(self.distal[1]-self.room_attachment['y'])**2)
else: fit_error = 0
return fit_error
def plot(self, ax = None):
plot_here = False
if not ax:
ax = plt.figure().add_subplot(111)
ax.set_aspect('equal')
plot_here = True
r = self.width/2
for edge in self.edges:
ax.plot([edge[0][0],edge[1][0]], [edge[0][1], edge[1][1]], c = self.color)
for x,y in zip([self.x,self.distal[0]], [self.y,self.distal[1]]):
phi = np.radians(np.linspace(0,360,37))
ax.plot(r*np.cos(phi)+x, r*np.sin(phi)+y, c = self.color )
# Extents Box
ax.plot([self.extents['left'], self.extents['right'], self.extents['right'], self.extents['left'], self.extents['left']],
[self.extents['bottom'], self.extents['bottom'], self.extents['top'], self.extents['top'], self.extents['bottom']],
alpha = .1, c = self.color)
# Floor Opening Point
ax.scatter(self.floor_opening, 0, c=self.color)
# Attachment Point
if self.attachment:
ax.scatter(self.room_attachment['x'], self.room_attachment['y'], marker = 'x', c = self.color)
ax.plot([self.distal[0], self.room_attachment['x']], [self.distal[1], self.room_attachment['y']], c = self.color, linestyle = 'dashed')
if plot_here: plt.show()
return ax
| Python | 119 | 34.722691 | 147 | /src/Murphy/link.py | 0.532926 | 0.511994 |
ljbelenky/murphy | refs/heads/master | class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def distance(self, other):
if isinstance(other, Point):
return self._distance_to_point(other)
elif isinstance(other, LineSegment):
return self._distance_to_line(other)
else:
raise Exception
def _distance_to_point(self, other):
return ((self.x-other.x)**2+(self.y-other.y)**2)**.5
def _distance_to_line(self, line):
x,y = self.x, self.y
x1, y1 = line.point1.x, line.point1.y
x2, y2 = line.point2.x, line.point2.y
numerator = abs((y2-y1)*x-(x2-x1)*y + x2*y1 - y2*x1)
denominator = ((y2-y1)**2 + (x2-x1)**2)**.5
return numerator/denominator
def __sub__(self, other):
x = self.x - other.x
y = self.y - other.y
return Point(x,y)
def __add__(self, other):
x = self.x + other.x
y = self.y + other.y
return Point(x,y)
def __isub__(self, other):
self.x -= other.x
self.y -= other.y
return self
def __iadd__(self, other):
self.x += other.x
self.y += other.y
return self
def __repr__(self):
return f'{self.x},{self.y}'
def is_between_lines(self, line1, line2):
'''Yields true if the point is in the interior of the rectangle
defined by the two parallel, flush, lines'''
pass
class LineSegment:
def __init__(self, p0, p1):
points = list(set([p0, p1]))
self.p0 = points[0]
self.p1 = points[1]
@property
def length(self):
return self.p0.distance(self.p1)
def distance_to_point(self, point):
return point.distance(self)
def __add__(self, point):
p0 = self.p0 + point
p1 = self.p1 + point
return LineSegment(p0, p1)
def __sub__(self, point):
p0 = self.p0 - point
p1 = self.p1 - point
return LineSegment(p0, p1)
def __iadd__(self, point):
self.p0, self.p1 = self.p0 + point, self.p1 + point
def __isub__(self, point):
self.p0, self.p1 = self.p0 - point, self.p1 - point
def __repr__(self):
return f'({self.p0})<-->({self.p1})'
| Python | 88 | 24.511364 | 71 | /src/Murphy/geometric_objects.py | 0.524933 | 0.498219 |
ljbelenky/murphy | refs/heads/master | class Murphy():
'''The Murphy Object represents a bed assembly at a particular angle'''
learning_rate = -.2
threshold = .001
def __init__(self, bedframe, A_link, B_link):
''' Basic structure'''
self.bedframe = bedframe
self.A = A_link
self.B = B_link
@property
def ikea_error(self):
'''The total difference between actual positions and intended positions for fixed, rigid components.'''
return sum([component.ikea_error for component in [self.A, self.B]])
def plot(self):
ax = plt.figure().add_subplot(111)
ax.set_aspect('equal')
for component in [self.bedframe, self.A, self.B]:
ax = component.plot(ax)
ax.set_title(round(self.ikea_error,2))
plt.show()
def assemble(self, plot_here = False):
''' For a given structure and bed angle, adjust link angles and bed (x,y) to minimize ikea error.'''
# loop over the following variables, making small adjustments until ikea error is minimized (ideally zero):
# [bedframe.x, bedframe.y, A_link.angle, B_link.angle, C_link.angle]
# Note: it is necessary to reposition C_link (x,y) to B_link.distal after B_link.angle is adjusted.
# while True:
for i in range(1000):
for variable in ['A.angle', 'B.angle', 'bedframe.x', 'bedframe.y']:
errors = []
for step in ['+=0.5', '-=1']:
exec('self.{variable} {step}'.format(variable = variable, step = step))
errors.append(self.ikea_error)
partial_derivative = errors[0]-errors[1]
adjustment = self.learning_rate*partial_derivative + .5
exec('self.{variable} += {adjustment}'.format(variable = variable, adjustment = adjustment))
if (i%5000==0) and plot_here:
self.plot()
if self.ikea_error < 0.125: break
# print('Assembled in {} steps with Ikea error {}'.format(i,round(self.ikea_error,3)))
if plot_here: self.plot()
| Python | 43 | 47.162792 | 115 | /src/Murphy/murphy.py | 0.585707 | 0.572187 |
Asritha-Reddy/5TASK | refs/heads/master | str = input("Enter a string: ")
def Dictionary(i):
dictionary = {}
for letter in i:
dictionary[letter] = 1 + dictionary.get(letter, 0)
return dictionary
def most_frequent(str):
alphabets = [letter.lower() for letter in str if letter.isalpha()]
dictionary = Dictionary(alphabets)
result = []
for key in dictionary:
result.append((dictionary[key], key))
result.sort(reverse=True)
for frequency, letter in result:
print (letter, frequency)
most_frequent(str)
| Python | 20 | 25 | 70 | /frequency.py | 0.625461 | 0.621771 |
kstandvoss/TFCA | refs/heads/master | from argparse import Namespace
import co2_dataset
import os
import time
# Settings
data_path = 'CO2/monthly_in_situ_co2_mlo.csv'
save_path = 'reg_params/params3'
epochs = 10000
minibatch_size = 100
mc_samples = 50
optimizer = 'adam'
learning_rate = 1e-1
momentum = 0.9
l2_weight = 1e-6
drop_p = 0.1
tau_rc = 0.07
tau_ref = 0.0005
amplitude = 0.01
train = False
continue_training = True
spiking = True
plot = True
comment = 'test run'
args = Namespace(data_path=data_path, epochs=epochs, minibatch_size=minibatch_size,
optimizer=optimizer, learning_rate=learning_rate, l2_weight=l2_weight, momentum=momentum,
mc_samples=mc_samples, tau_ref=tau_ref, tau_rc=tau_rc, train=train, continue_training=continue_training,
save_path=save_path, amplitude=amplitude, drop_p=drop_p, spiking=spiking, plot=plot)
print('########################')
print(comment) # a comment that will be printed in the log file
print(args) # print all args in the log file so we know what we were running
print('########################')
start = time.time()
loss = co2_dataset.main(args)
print("The training took {:.1f} minutes with a loss of {:.3f}".format((time.time()-start)/60,loss)) # measure time
| Python | 41 | 37.439026 | 151 | /run.py | 0.528553 | 0.502538 |
kstandvoss/TFCA | refs/heads/master | # coding: utf-8
import nengo
import nengo_dl
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from scipy import signal
import argparse
import pdb
def main(args):
co2_data = pd.read_csv(args.data_path, usecols=[0,4,5,6,7,8,9])
co2_data.columns = ['Date', 'standard', 'season_adjust', 'smoothed', 'smoothed_season', 'standard_no_missing', 'season_no_missing']
detrended = signal.detrend(co2_data['standard_no_missing'][200:600])
detrended /= np.max(detrended)
detrended *= 2
#if args.plot:
# plt.plot(detrended)
# plt.axvline(x=300, c='black', lw='1')
# plt.ylim([-20,20])
# plt.xlim([0,500])
# # Training setup
# leaky integrate and fire parameters
lif_params = {
'tau_rc': args.tau_rc,
'tau_ref': args.tau_ref,
'amplitude': args.amplitude
}
# training parameters
drop_p = args.drop_p
minibatch_size = args.minibatch_size
n_epochs = args.epochs
learning_rate = args.learning_rate
momentum = args.momentum
l2_weight = args.l2_weight
# lif parameters
lif_neurons = nengo.LIF(**lif_params)
# softlif parameters (lif parameters + sigma)
softlif_neurons = nengo_dl.SoftLIFRate(**lif_params,sigma=0.002)
# ensemble parameters
ens_params = dict(max_rates=nengo.dists.Choice([100]), intercepts=nengo.dists.Choice([0]))
def build_network(neuron_type, drop_p, l2_weight, n_units=1024, num_layers=4, output_size=1):
with nengo.Network() as net:
use_dropout = False
if drop_p:
use_dropout = True
#net.config[nengo.Connection].synapse = None
#nengo_dl.configure_settings(trainable=False)
# input node
inp = nengo.Node([0])
shape_in = 1
x = inp
# the regularizer is a function, so why not reuse it
reg = tf.contrib.layers.l2_regularizer(l2_weight)
class DenseLayer(object):
i=0
def pre_build(self, shape_in, shape_out):
self.W = tf.get_variable(
"weights" + str(DenseLayer.i), shape=(shape_in[1], shape_out[1]),
regularizer=reg)
self.B = tf.get_variable(
"biases" + str(DenseLayer.i), shape=(1, shape_out[1]), regularizer=reg)
DenseLayer.i+=1
def __call__(self, t, x):
return x @ self.W + self.B
for n in range(num_layers):
# add a fully connected layer
a = nengo_dl.TensorNode(DenseLayer(), size_in=shape_in, size_out=n_units, label='dense{}'.format(n))
nengo.Connection(x, a, synapse=None)
shape_in = n_units
x = a
# apply an activation function
x = nengo_dl.tensor_layer(x, neuron_type, **ens_params)
# add a dropout layer
x = nengo_dl.tensor_layer(x, tf.layers.dropout, rate=drop_p, training=use_dropout)
# add an output layer
a = nengo_dl.TensorNode(DenseLayer(), size_in=shape_in, size_out=output_size)
nengo.Connection(x, a, synapse=None)
return net, inp, a
do_train = args.train
continue_training = args.continue_training
param_path = args.save_path
trainset_size = len(detrended)
x = np.linspace(-2,2,trainset_size)
y = detrended
# # training on continuous soft leaky integrate and fire neurons
# construct the network
net, inp, out = build_network(softlif_neurons, drop_p, l2_weight)
with net:
in_p = nengo.Probe(inp, 'output')
out_p = nengo.Probe(out, 'output')
"""
# define training set etc.
"""
#pdb.set_trace()
#train_x = {inp: x.reshape((minibatch_size, trainset_size // minibatch_size))[..., None]}
#train_y = {out_p: y.reshape((minibatch_size, trainset_size // minibatch_size))[..., None]}
target = x[:,None,None]
train_x = {inp: target[:300]}
train_y = {out_p: y[:300,None,None]}
test_x = {inp: target[300:]}
test_y = {out_p: y[300:,None,None]}
# construct the simulator
with nengo_dl.Simulator(net, minibatch_size=minibatch_size, tensorboard='./tensorboard') as sim:
#, tensorboard='./tensorboard')
# define the loss function (We need to do this in the
# context of the simulator because it changes the
# tensorflow default graph to the nengo network.
# That is, tf.get_collection won't work otherwise.)
def mean_squared_error_L2_regularized(y, t):
if not y.shape.as_list() == t.shape.as_list():
raise ValueError("Output shape", y.shape, "differs from target shape", t.shape)
e = tf.reduce_mean((t - y)**2) + tf.add_n(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
return e
with tf.name_scope('sum_weights'):
first = 0
for node in net.nodes:
if type(node) == nengo_dl.tensor_node.TensorNode:
if 'Dense' in str(node.tensor_func):
if not first:
sum_weights = tf.linalg.norm(node.tensor_func.W)
first = 1
else:
sum_weights += tf.linalg.norm(node.tensor_func.W)
weight_summary = tf.summary.scalar('sum_weights', sum_weights)
starter_learning_rate = args.learning_rate
learning_rate = tf.train.exponential_decay(starter_learning_rate, sim.tensor_graph.training_step,
1000, 0.96, staircase=True)
# define optimiser
if args.optimizer=='rmsprop':
opt = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
elif args.optimizer=='sgd':
opt = tf.train.MomentumOptimizer(learning_rate=learning_rate, momentum=momentum, use_nesterov=True)
elif args.optimizer=='adadelta':
opt = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
elif args.optimizer=='adam':
opt = tf.train.AdamOptimizer(learning_rate=learning_rate)
#pdb.set_trace()
loss = 0
# actual training loop
if do_train:
if continue_training:
sim.load_params(path=param_path)
loss = sim.loss(test_x, test_y, objective='mse')
print("error before training: ", loss)
sim.train(train_x, train_y, opt, n_epochs=n_epochs, shuffle=True, objective={out_p:mean_squared_error_L2_regularized}, summaries=['loss', weight_summary])
loss = sim.loss(test_x, test_y, objective='mse')
print("error after training:", loss)
sim.save_params(path=param_path)
else:
sim.load_params(path=param_path)
T = args.mc_samples
outputs = np.zeros((T,target.size))
for t in range(T):
for i in range(0,target.size,minibatch_size):
sim.run_steps(1,input_feeds={inp: target[i:i+minibatch_size]})
sim.soft_reset(include_trainable=False, include_probes=False)
outputs[t] = sim.data[out_p].transpose(1,0,2).reshape((len(target),))
sim.soft_reset(include_trainable=False, include_probes=True)
predictive_mean = np.mean(outputs, axis=0)
predictive_variance = np.var(outputs, axis=0)
tau = (1 - args.drop_p) / (2 * len(predictive_variance) * args.l2_weight)
predictive_variance += tau**-1
target = np.squeeze(target)
if args.plot:
plt.plot(target,predictive_mean,label='out')
plt.fill_between(target, predictive_mean-2*np.sqrt(predictive_variance), predictive_mean+2*np.sqrt(predictive_variance),
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848', linewidth=0, label='variance')
plt.plot(target,detrended,label='target', color='blue',alpha=0.5)
plt.axvline(x=x[300], c='black', lw='1')
plt.ylim([-10,10])
plt.xlim([-2,2])
plt.legend(loc='upper right')
if args.spiking:
# # test on LIF neurons
# timesteps
# MC dropout samples
MC_drop = T
T = 100
# we want to see if spiking neural networks
# need dropout at all, so we disable it
net, inp, out = build_network(lif_neurons, drop_p=0, l2_weight=l2_weight)
with net:
in_p = nengo.Probe(inp)
out_p = nengo.Probe(out)
# start a new simulator
# T is the amount of MC dropout samples
sim = nengo_dl.Simulator(net, minibatch_size=minibatch_size)#, unroll_simulation=10, tensorboard='./tensorboard')
# load parameters
sim.load_params(path=param_path)
# copy the input for each MC dropout sample
minibatched_target = np.tile(target[:, None], (1,T))[..., None]
# run for T timesteps
spiking_outputs = np.zeros((target.size,T))
spiking_inputs = np.zeros((target.size,T))
for i in range(0,target.size,minibatch_size):
sim.soft_reset(include_trainable=False, include_probes=True)
sim.run_steps(T,input_feeds={inp: minibatched_target[i:i+minibatch_size,:]})
spiking_outputs[i:i+minibatch_size] = sim.data[out_p][...,0]
spiking_inputs[i:i+minibatch_size] = sim.data[in_p][...,0]
if args.plot:
# plot
plt.figure()
plt.scatter(spiking_inputs.flatten(), spiking_outputs.flatten(), c='r', s=1, label="output")
plt.plot()
#plt.plot(target.flatten(), y(target).flatten(), label="target", linewidth=2.0)
plt.legend(loc='upper right');
plt.plot(x,y, label='train set')
plt.axvline(x=x[300], c='black', lw='1')
plt.ylim([-10,10])
plt.xlim([-2,2])
# print(sim.data[out_p].shape)
predictive_mean = np.mean(spiking_outputs[:,-MC_drop:],axis=1)
predictive_variance = np.var(spiking_outputs[:,-MC_drop:],axis=1)
tau = (1 - args.drop_p) / (2 * len(predictive_variance) * args.l2_weight)
predictive_variance += tau**-1
plt.figure()
plt.plot(target,predictive_mean,label='out')
#plt.plot(target,spiking_outputs[:,-1],label='out')
plt.fill_between(np.squeeze(target), predictive_mean-2*np.sqrt(predictive_variance), predictive_mean+2*np.sqrt(predictive_variance),
alpha=0.5, edgecolor='#CC4F1B', facecolor='#FF9848', linewidth=0, label='variance')
plt.plot(x, y, c='blue', alpha=0.5, label='dataset')
#plt.scatter(x,y, color='black', s=9, label='train set')
plt.axvline(x=x[300], c='black', lw='1')
plt.legend(loc='upper right',)
plt.ylim([-10,10])
plt.xlim([-2,2])
sim.close()
if args.plot:
plt.show()
return loss
if __name__=='__main__':
parser = argparse.ArgumentParser(description='Train spiking neural network to perform variational inference on co2 dataset')
parser.add_argument('data_path', action='store',
help='Path to data')
parser.add_argument('-e', '--epochs', action='store', dest='epochs', type=int, default=100,
help='Number of training epochs')
parser.add_argument('-mb', action='store', dest='minibatch_size', type=int, default=25,
help='Size of training mini batches')
parser.add_argument('-t', action='store', dest='mc_samples', type=int, default=20,
help='Number of MC forwardpasses and timesteps for spiking network')
parser.add_argument('-o', '--optimizer', action='store', dest='optimizer', default='rmsprop', choices=('sgd', 'adadelta', 'adam', 'rmsprop'),
help='Optimization function')
parser.add_argument('-r', '--learning_rate', action='store', dest='learning_rate', type=float,
help='Learning rate', default=1e-4)
parser.add_argument('-m', '--momentum', action='store', dest='momentum', type=float,
help='Momentum', default=0.9)
parser.add_argument('-l', '--l2_weight', action='store', dest='l2_weight', type=float,
help='Weight of l2 regularization', default=1e-6)
parser.add_argument('-d', '--dropout', action='store', dest='drop_p', type=float,
help='Dropout probability', default=0.1)
parser.add_argument('-rc', '--tau_rc', action='store', dest='tau_rc', type=float,
help='LIF parameter', default=0.07)
parser.add_argument('-ref', '--tau_ref', action='store', dest='tau_ref', type=float,
help='LIF parameter', default=0.0005)
parser.add_argument('-a', '--amplitude', action='store', dest='amplitude', type=float,
help='LIF parameter', default=0.05)
parser.add_argument('--save_path', action='store', default='./reg_params/params')
parser.add_argument('--train', action='store_true', dest='train', default=True,
help='Train new network, else load parameters')
parser.add_argument('--continue_training', action='store_true', dest='continue_training', default=False,
help='Continue training from previous parameters')
parser.add_argument('--plot', action='store_true', dest='plot', default=False,
help='Plot results')
parser.add_argument('--spiking', action='store_true', dest='spiking', default=False,
help='Test spiking model')
args = parser.parse_args()
main(args)
| Python | 350 | 39.488571 | 176 | /co2_dataset.py | 0.564595 | 0.550131 |
mclain98021/FredwareBinTools | refs/heads/master | #!/usr/local/bin/python
from os.path import expanduser
execfile(expanduser('~/python/evm2003/brp/wizard.py')) | Python | 5 | 21.4 | 54 | /brpwizard | 0.765766 | 0.72973 |
mclain98021/FredwareBinTools | refs/heads/master | #!/usr/bin/python
'''
Apache log analysis script for Amazon Code challenge.
July 2nd 2016 by Fred McLain.
Copyright (C) 2016 Fred McLain, all rights reserved.
high level language of your choice (e.g. Python/Ruby/Perl)
The right fit language appears to be Python, so I'm going with that even though I'm a Java developer.
required:
* Top 10 requested pages and the number of requests made for each
* Percentage of successful requests (anything in the 200s and 300s range)
* Percentage of unsuccessful requests (anything that is not in the 200s or 300s range)
* Top 10 unsuccessful page requests
* The top 10 IPs making the most requests, displaying the IP address and number of requests made.
* Option parsing to produce only the report for one of the previous points (e.g. only the top 10 urls, only the percentage of successful requests and so on)
* A README file explaining how to use the tool, what its dependencies and any assumptions you made while writing it
optional:
* Unit tests for your code.
* The total number of requests made every minute in the entire time period covered by the file provided.
* For each of the top 10 IPs, show the top 5 pages requested and the number of requests for each.
Assumptions:
* Statistics for the number of pages and requesting IPs does not exceed available memory.
* Log file lines are of a uniform format
* Log records are in time order
Sample log lines:
10.0.68.207 - - [31/Oct/1994:14:00:17 +0000] "POST /finance/request.php?id=39319 HTTP/1.1" 200 56193
10.0.173.204 - - [31/Oct/1994:14:00:20 +0000] "GET /kernel/get.php?name=ndCLVHvbDM HTTP/1.1" 403 759
Records are new line separated.
Fields in record are whitespace separated:
IP - - [timestamp] "request path status ?
'''
from __future__ import division
import sys
from optparse import OptionParser
parser = OptionParser()
parser.add_option("-f", "--file", dest="fileName", help="file to parse, default=%default", metavar="FILE", default="apache.log")
parser.add_option("-a", "--reportAll", action="store_true", dest="reportAll", help="show all reports", default=False)
parser.add_option("-t", "--top10", action="store_true", dest="reportTop10", help="Top 10 requested pages and the number of requests made for each", default=False)
parser.add_option("-s", "--success", action="store_true", dest="reportSucccessPercentReq", help="Percentage of successful requests (anything in the 200s and 300s range)", default=False)
parser.add_option("-u", "--unsuccess", action="store_true", dest="reportUnsucccessPercentReq", help="Percentage of unsuccessful requests (anything that is not in the 200s or 300s range)", default=False)
parser.add_option("-r", "--top10Unsuccess", action="store_true", dest="reportTop10Unsuccess", help="Top 10 unsuccessful page requests", default=False)
parser.add_option("-i", "--top10IpPages", action="store_true", dest="reportTop10IpPages", help="The top 10 IPs making the most requests, displaying the IP address and number of requests made", default=False)
#parser.add_option("-m", "--numReqPerMinute", action="store_true", dest="reportReqPerMinute", help="The total number of requests made every minute in the entire time period covered by the file provided.", default=False)
(options, args) = parser.parse_args()
# accumulators for report stats
#totalRequests = 0
#requestMap = 0
#successCount = 0
#failCount = 0
errorList = []
totalPages = {}
failPages = {}
successPages = {}
ipPages={}
def analizeFile(fileName):
print "Parsing file:", fileName
try:
f = open(fileName)
except IOError:
errorList.append("Can not read " + fileName)
return
lineno = 0
for line in f:
lineno += 1
try:
analizeLine(line)
except:
errorList.append("Error in " + fileName + " on line " + str(lineno))
return
def analizeLine(logLine):
'''
Fields in record are whitespace separated:
IP - - [timestamp] "request path status ?
'''
# print logLine
r = logLine.split()
#print r
'''
0 = IP 3 = timestamp 4 = TZ 5 = method 6 = page 7 = protocol 8 = status 9 = ?
['10.0.16.208', '-', '-', '[31/Oct/1994:23:59:50', '+0000]', '"GET', '/finance/list.php?value=60549', 'HTTP/1.0"', '404', '1595']
'''
ip = r[0]
timestamp = r[3].lstrip('[')
#timestamp = time.strptime(r[3].lstrip('['),"%d/%b/%Y:%H:%M:%S")
method = r[5].lstrip('"')
#page = r[6].split("?")[0]
page = r[6]
stat = int(r[8])
if page in totalPages:
totalPages[page] = totalPages[page] + 1
else:
totalPages.update({page:1})
if ip in ipPages:
ipPages[ip] = ipPages[ip] +1
else:
ipPages.update({ip:1})
if (stat >= 200) and (stat < 400):
# success
if page in successPages:
successPages[page] = successPages[page] + 1
else:
successPages.update({page:1})
else:
# failure
if page in failPages:
failPages[page] = failPages[page] + 1
else:
failPages.update({page:1})
return
def reportTop10(dict):
s=sorted(dict,key=dict.__getitem__,reverse=True)
i = 1
for k in s:
print i,k,dict[k]
if i == 10:
break
i += 1
def report():
if options.reportAll or options.reportTop10:
print "Most requested pages:"
reportTop10(totalPages)
''' not in spec but useful?
print "Most successful pages (page, count):"
reportTop10(successPages)
'''
if options.reportAll or options.reportSucccessPercentReq:
# print len(successPages),"/",len(totalPages),len(failPages)
print "Percentage of successful requests: ",str(len(successPages)/len(totalPages)*100.),"%"
if options.reportAll or options.reportUnsucccessPercentReq:
print "Most failed pages (page, count):"
reportTop10(failPages)
if options.reportAll or options.reportTop10IpPages:
print "The top 10 IPs making the most requests, (IP, count)"
reportTop10(ipPages)
return
def usage():
parser.print_help()
exit(-1)
return
def go():
print "Apache log file parser demonstration by Fred McLain, July 2nd 2016"
if 1 == len(sys.argv):
usage() # require command line arguments or show usage and bail out
analizeFile(options.fileName)
report()
if len(errorList) > 0:
print "Errors in input",errorList
return
go()
| Python | 173 | 36.682079 | 219 | /Amazon_log_analizer/ApacheLogParse.py | 0.657923 | 0.621261 |
J4ME5s/guess-the-number | refs/heads/master | basic.show_string("Think of a number between 1 to 10")
basic.show_string("Input your answer here")
input.button_is_pressed(Button.A)
basic.show_string("The answer was...")
basic.show_number(randint(1, 10))
| Python | 5 | 40.200001 | 54 | /main.py | 0.747573 | 0.718447 |
prakharg24/review_classifier_non_neural | refs/heads/master | import json
import codecs
import random
from sklearn.feature_extraction.text import CountVectorizer
from nltk.stem.snowball import SnowballStemmer
from sklearn.naive_bayes import MultinomialNB
import numpy as np
from sklearn import metrics
import numpy as np
from sklearn import svm
from sklearn.feature_extraction.text import TfidfVectorizer
from scipy.sparse import hstack
from nltk.tokenize import RegexpTokenizer
import pickle
import sys
def getclassmore(a):
if a>3: return a-2
elif a<3: return 0
else: return 1
def getclass(a):
if a>3:
return 2
elif a<3:
return 0
else:
return 1
def allupper(word):
for c in word:
if not(c.isupper()):
return False
return True
def cleandoc(doc):
global imp
unclean = doc.split()
words = []
for word in unclean:
if len(word)>2:
words.append(word)
if word in imp:
for i in range(0, 3):
words.append(word)
lng = len(words)
for i in range(0, lng):
word = words[i]
if allupper(word):
words.append(word)
if word=="not":
for j in range(1, 5):
if(i+j<lng):
words[i+j]="NOT_" + words[i+j]
lower_words = [word.lower() for word in words]
return ' '.join(lower_words)
print("Reading side files")
imp = set()
file = open('adjectives.txt', 'r')
for adj_en in file.readlines():
imp.add(adj_en.split()[0])
file = open('adverbs.txt', 'r')
for adj_en in file.readlines():
imp.add(adj_en.split()[0])
file = open('verbs.txt', 'r')
for adj_en in file.readlines():
imp.add(adj_en.split()[0])
print("Reading test json file")
test_data = []
test_it = 0
with codecs.open(sys.argv[1],'rU','utf-8') as f:
for line in f:
test_it = test_it + 1
test_data.append(json.loads(line))
print("Cleaning test sentences")
test_sentences = []
end = test_it
i = 0
while(i<end):
sent = test_data[i]['reviewText']
temp = ""
for j in range(0, 3):
temp = temp + test_data[i]['summary']
sent = sent + temp
test_sentences.append(cleandoc(sent))
i = i+1
with open('vect_uni.pkl', 'rb') as f:
vect_uni = pickle.load(f)
with open('vect_bi.pkl', 'rb') as f:
vect_bi = pickle.load(f)
print("Making Test matrix - Unigrams")
test_matrix_uni = vect_uni.transform(test_sentences)
print("Making Test matrix - Unigrams")
test_matrix_bi = vect_bi.transform(test_sentences)
test_matrix = hstack((test_matrix_uni, test_matrix_bi))
print("Predicting")
with open('model.pkl', 'rb') as f:
model = pickle.load(f)
y_pred = model.predict(test_matrix)
y_pred_class = []
for ele in y_pred:
if(ele==3):
y_pred_class.append(2)
else:
y_pred_class.append(ele)
file = open(sys.argv[2],'w')
for ele in y_pred_class:
if(ele==0):
file.write("1\n")
elif(ele==1):
file.write("3\n")
elif(ele==2):
file.write("5\n") | Python | 129 | 22.085272 | 59 | /final.py | 0.585507 | 0.573913 |
Powercoders-International/ft-web-dev | refs/heads/main | from json import loads
from django.http import JsonResponse
from django.http import HttpResponseNotAllowed
def view_articles(request):
""" Handles GET and POST requests for a collection of articles.
curl --include \
http://localhost:8000/shop/articles/
curl --include \
--request POST \
--header "Content-Type: application/json" \
--data '{"name":"test"}' \
http://localhost:8000/shop/articles/
"""
if request.method == 'GET':
return JsonResponse({'ids': [id for id in range(10)]})
if request.method == 'POST':
data = loads(request.body)
data['id'] = 1
return JsonResponse(data)
return HttpResponseNotAllowed(['GET', 'POST'])
def view_article(request, id):
""" Handles GET, PATCH and DELETE requests for a single article.
curl --include \
http://localhost:8000/shop/articles/1/
curl --include \
--request PATCH \
--header "Content-Type: application/json" \
--data '{"name":"test"}' \
http://localhost:8000/shop/articles/1/
curl --include \
--request DELETE \
http://localhost:8000/shop/articles/1/
"""
if request.method == 'GET':
return JsonResponse({'id': id})
if request.method == 'PATCH':
data = loads(request.body)
data['id'] = id
return JsonResponse(data)
if request.method == 'DELETE':
return JsonResponse({'id': id})
return HttpResponseNotAllowed(['GET', 'PATCH', 'DELETE'])
| Python | 60 | 24.700001 | 68 | /05-django/solutions/exercise-2-static/shop/views.py | 0.594034 | 0.577173 |
Powercoders-International/ft-web-dev | refs/heads/main | from django.db.models import Model
from django.db.models import CharField
class Article(Model):
name = CharField(max_length=50)
| Python | 6 | 21.333334 | 38 | /05-django/solutions/exercise-3-models/shop/models.py | 0.768657 | 0.753731 |
Powercoders-International/ft-web-dev | refs/heads/main | from shop.views import ArticleViewSet
from rest_framework.routers import DefaultRouter
router = DefaultRouter()
router.register('articles', ArticleViewSet)
urlpatterns = router.urls
| Python | 7 | 25.285715 | 48 | /05-django/solutions/exercise-5-filters/shop/urls.py | 0.826087 | 0.826087 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.