seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
39901615627 | from django.http import HttpResponse, HttpResponseNotAllowed, \
HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.template import loader
from django.contrib import messages
from django.core.urlresolvers import reverse
from django.core.exceptions import ValidationError
from new.utils import json_error, check_fields_in_data, MODEL_MAP, \
MODEL_FORM_MAP, get_template_for_model
from browse.models import ReviewVote, Report, Review
import json
import datetime
@login_required
def edit(request, page=None, id=None):
# Check that id exists for page.
if page not in MODEL_MAP.keys():
return json_error({"error": "Unknown page requested."})
instances = MODEL_MAP[page].objects.filter(id=id)
if len(instances) != 1:
return json_error({"error": "Unknown {} id {} provided."
.format(page, id)})
owner = None
instance = instances[0]
if hasattr(instance, "created_by"):
owner = instance.created_by
elif hasattr(instance, "owner"):
owner = instance.owner
if owner and owner != request.user:
return json_error({"error": "You do not own this instance."})
# Functionality is so similar to new, just hand it off
return new(request, page=page, id=id, type="edit")
def new(request, type="new", page=None, id=None):
if not request.user.is_authenticated():
if request.method == "POST":
return json_error({"error": "Please login to add a {}."
.format(page)})
else:
redir = request.META.get("HTTP_REFERER")
if not redir:
redir = reverse("home")
messages.error(request,
"You must be logged in to add a {}.".format(page))
return HttpResponseRedirect(redir)
model = None
response = {"error": {"error": ""}}
if request.method != "POST":
return get_template_for_model(request, MODEL_FORM_MAP, page)
data = json.loads(request.body.decode())
if page not in MODEL_MAP:
return json_error({"error": "Requested page type \"{}\" does not have"
" a known model.".format(page)})
if page not in MODEL_FORM_MAP.keys():
return json_error({"error": "Requested page type \"{}\" does not have"
" a known form.".format(page)})
model = MODEL_MAP[page]
form = MODEL_FORM_MAP[page]
# If model has an owner or created by field, add us
if form.needs_owner:
data["owner"] = request.user
elif form.needs_created_by:
data["created_by"] = request.user
# FIXME: Is this necessary? It seems like it should autoresolve this
if page == "reviewcomment":
data["target"] = Review.objects.get(id=int(data["target"]))
res = check_fields_in_data(data, model, form)
if res:
return res
# Look for any errors
for k, v in response["error"].items():
if len(v) > 0:
return HttpResponse(json.dumps(response))
try:
emptyKeys = []
for key, value in data.items():
if value == '':
emptyKeys.append(key)
for key in emptyKeys:
data.pop(key)
print(data)
if type == "new":
# Try to create it
new = model(**data)
elif type == "edit":
# We can assume it exists
new = model.objects.get(id=id)
for k, v in data.items():
setattr(new, k, data[k])
if hasattr(new, "updated_ts"):
new.updated_ts = datetime.datetime.now()
new.full_clean()
except ValidationError as e:
print("ERROR: " + str(e))
errorDict = {}
for key, value in e.message_dict.items():
if isinstance(value, list):
errorDict[key] = " ".join(value).strip("[]/'")
return HttpResponse(json_error(errorDict))
for field in MODEL_FORM_MAP[page].Meta.fields:
response["error"][field] = "" # clear errors
new.save()
response["id"] = new.id # return new id at top level.
# Save and return all info
return HttpResponse(json.dumps(response))
def addVote(request, wat=None):
# I don't know where 'wat' is coming from, but it's not needed...
if request.method == "POST":
if not request.user.is_authenticated():
jsonResponse = {"success": False,
"error": "User not logged in"}
return HttpResponse(json.dumps(jsonResponse),
content_type="application/json")
review_id = request.POST.get("review-id")
action = request.POST.get("action").lower()
user = request.user
review = Review.objects.get(id=review_id)
try:
vote = ReviewVote.objects.filter(target=review,
owner=user)
# If the vote exists, we need to change it based on input.
# Currently votes are changed as such:
# If the user presses the same direction as their current vote
# then the vote is removed
# If the user presses opposite their vote, the vote is changed
# to the new direction
if vote.exists():
vote = vote[0]
if (vote.quality and action == "up") or \
(not vote.quality and action == "down"):
vote.delete()
else:
vote.quality = (action == "up")
vote.save()
# vote doesn't exist yet, then it needs to be created.
elif (action == "up" or action == "down"):
vote = ReviewVote(target=review,
owner=user,
quality=(action == "up"))
vote.save()
except:
jsonResponse = {"success": False,
"error": "Could not complete vote"}
return HttpResponse(json.dumps(jsonResponse),
content_type="application/json")
return HttpResponse(json.dumps({"success": True}),
content_type="application/json")
else:
return HttpResponseNotAllowed(["POST"])
@login_required
def report(request, model_name, id):
"""
This view serves both the proper form page and the POST requests for
the report form page.
It's essentially a clone of new but with a few fixes since the model is
mucked up with metamadness.
"""
if model_name not in MODEL_MAP:
if request.method != "POST":
return HttpResponse("Unknown model name specified.")
return json_error({"error": "Requested page type \"{}\" does not "
"have a known model."
.format(model_name)
})
if model_name not in MODEL_FORM_MAP:
if request.method != "POST":
return HttpResponse("Unknown model name specified.")
return json_error({"error": "Requested page type \"{}\" does not "
"have a known form.".format(model_name)
})
if request.method == "POST":
res = {}
data = json.loads(request.body.decode())
target_model = MODEL_MAP[model_name]
form = MODEL_FORM_MAP["report"]
inst = target_model.objects.get(id=id)
if not inst:
json_error({"error": "Unknown model instance id for provided model"
" ({} for '{}').".format(id, model_name)})
err = check_fields_in_data(data, Report, form)
if err:
return err
print(data)
new = Report.create(target_model, id, request.user, data["summary"],
data["text"])
new.save()
res["id"] = new.id
messages.success(request, "Added report!")
return HttpResponse(json.dumps(res),
content_type="application/json")
else:
inst = MODEL_MAP[model_name].objects.get(id=id)
template = loader.get_template("new/report.html")
context = {"instance": inst, "model": model_name, "id": id}
return HttpResponse(template.render(context))
@login_required
def resolve_report(request, report_id):
"""
This view serves both the proper form page and the POST requests for
the resolve report form page.
It's essentially a clone of report but with a few changes to make
resolution better.
"""
# TODO: Check if staff
inst = Report.objects.get(id=report_id)
if not inst:
return json_error({"error": "Unknown report with id {}".format(id)})
if inst.handled:
return json_error({"error": "Report has already been resolved."})
if request.method == "POST":
res = {}
data = json.loads(request.body.decode())
if "text" not in data:
return json_error({"text": "Missing text field."})
if "summary" not in data or data["summary"] == "":
return json_error({"summary": "Missing action field."})
inst.resolve(by=request.user, comment=data["text"])
res["id"] = inst.id
return HttpResponse(json.dumps(res),
content_type="application/json")
else:
template = loader.get_template("new/resolve_report.html")
context = {"instance": inst, "id": report_id}
return HttpResponse(template.render(context))
| brhoades/sweaters-but-with-peer-reviews | new/views.py | views.py | py | 9,719 | python | en | code | 1 | github-code | 36 |
71335937383 | class Solution:
def permuteUnique(self, nums: List[int]) -> List[List[int]]:
ans = [nums[:]]
def next_perm(nums):
size = len(nums)
index = size - 2
while index >= 0 and nums[index] > nums[index + 1]:
index -= 1
if index == -1:
nums.sort()
return nums
j = size - 1
for j in reversed(range(index + 1, size)):
if nums[j] > nums[index]:
break
nums[j], nums[index] = nums[index], nums[j]
def reverse(start_idx, end_idx):
while start_idx < end_idx:
nums[start_idx], nums[end_idx] = nums[end_idx], nums[start_idx]
start_idx += 1
end_idx -= 1
reverse(index + 1, size - 1)
return nums
next_perm(nums)
while True:
if nums==ans[0]:
break
while nums==ans[-1]:
next_perm(nums)
if nums==ans[0]:
break
ans.append(nums[:])
return ans
| architjee/solutions | Leetcode/permutations II.py | permutations II.py | py | 1,155 | python | en | code | 0 | github-code | 36 |
17123809199 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
#
# SPDX-License-Identifier: GPL-3.0
#
##################################################
# GNU Radio Python Flow Graph
# Title: Gr Baseband Async O
# Generated: Tue Apr 16 11:20:50 2019
# GNU Radio version: 3.7.12.0
##################################################
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio import gr
from gnuradio.eng_option import eng_option
from gnuradio.filter import firdes
from optparse import OptionParser
import pmt
import red_pitaya
class gr_baseband_async_o(gr.top_block):
def __init__(self, ch_port=1, tx_ip='192.168.5.100', samp_rate=250000, sig_in='./in.dat'):
gr.top_block.__init__(self, "Gr Baseband Async O")
##################################################
# Parameters
##################################################
self.ch_port = ch_port
self.tx_ip = tx_ip
self.samp_rate = samp_rate
self.sig_in = sig_in
##################################################
# Blocks
##################################################
self.red_pitaya_sink_0 = red_pitaya.sink(
addr=tx_ip,
port=1000 + ch_port,
freq=0,
rate=samp_rate,
corr=0,
ptt=True
)
self.blocks_head_0 = blocks.head(gr.sizeof_gr_complex*1, 50000000)
self.blocks_float_to_complex_0 = blocks.float_to_complex(1)
self.blocks_file_source_0 = blocks.file_source(gr.sizeof_float*1, sig_in, False)
self.blocks_file_source_0.set_begin_tag(pmt.PMT_NIL)
self.analog_const_source_x_0 = analog.sig_source_f(0, analog.GR_CONST_WAVE, 0, 0, 0)
##################################################
# Connections
##################################################
self.connect((self.analog_const_source_x_0, 0), (self.blocks_float_to_complex_0, 1))
self.connect((self.blocks_file_source_0, 0), (self.blocks_float_to_complex_0, 0))
self.connect((self.blocks_float_to_complex_0, 0), (self.blocks_head_0, 0))
self.connect((self.blocks_head_0, 0), (self.red_pitaya_sink_0, 0))
def get_ch_port(self):
return self.ch_port
def set_ch_port(self, ch_port):
self.ch_port = ch_port
def get_tx_ip(self):
return self.tx_ip
def set_tx_ip(self, tx_ip):
self.tx_ip = tx_ip
def get_samp_rate(self):
return self.samp_rate
def set_samp_rate(self, samp_rate):
self.samp_rate = samp_rate
self.red_pitaya_sink_0.set_rate(self.samp_rate)
def get_sig_in(self):
return self.sig_in
def set_sig_in(self, sig_in):
self.sig_in = sig_in
self.blocks_file_source_0.open(self.sig_in, False)
def argument_parser():
parser = OptionParser(usage="%prog: [options]", option_class=eng_option)
parser.add_option(
"-p", "--ch-port", dest="ch_port", type="intx", default=1,
help="Set ch_port [default=%default]")
parser.add_option(
"-w", "--tx-ip", dest="tx_ip", type="string", default='192.168.5.100',
help="Set tx ip [default=%default]")
parser.add_option(
"-r", "--samp-rate", dest="samp_rate", type="eng_float", default=eng_notation.num_to_str(250000),
help="Set sample rate [default=%default]")
parser.add_option(
"-i", "--sig-in", dest="sig_in", type="string", default='./in.dat',
help="Set signal in [default=%default]")
return parser
def main(top_block_cls=gr_baseband_async_o, options=None):
if options is None:
options, _ = argument_parser().parse_args()
tb = top_block_cls(ch_port=options.ch_port, tx_ip=options.tx_ip, samp_rate=options.samp_rate, sig_in=options.sig_in)
tb.start()
tb.wait()
if __name__ == '__main__':
main()
| fmagno/dsp | dsp/transmission_gr38/gr_baseband_async_o.py | gr_baseband_async_o.py | py | 3,933 | python | en | code | 1 | github-code | 36 |
71984366824 | import pandas as pd
from sklearn import metrics
from sklearn import preprocessing
from chapter5 import config
from chapter5 import model_dispatcher
from common import utils
def run(fold):
df = pd.read_csv(config.CENSUS_FILE_FOLDS)
# 目的変数を変換
target_mapping = {"<=50K": 0, ">50K": 1}
df.loc[:, "income"] = df["income"].map(target_mapping)
ftrs = utils.exclude_cols_from_df(df, ("kfold", "income"))
# すべて質的変数のデータなので、すべてのカラムの欠損値を同様に補完
for col in ftrs:
df.loc[:, col] = df[col].astype(str).fillna("NONE")
# ラベルエンコード
# one hot エンコードに対し決定木系は時間がかかるため
for col in ftrs:
lbl = preprocessing.LabelEncoder()
lbl.fit(df[col])
df.loc[:, col] = lbl.transform(df[col])
# 引数と一致しない番号を学習に、さもなくば検証に利用
df_train = df[df.kfold != fold].reset_index(drop=True)
df_valid = df[df.kfold == fold].reset_index(drop=True)
x_train = df_train[ftrs].values
x_valid = df_valid[ftrs].values
# 学習
mdl = model_dispatcher.models["xgb"](n_jobs=-1)
mdl.fit(x_train, df_train.income.values)
# AUCを計算
# predict_proba で [[クラス「0」の確率、クラス「1」の確率]] の配列を取得できる
valid_preds = mdl.predict_proba(x_valid)[:, 1]
auc = metrics.roc_auc_score(df_valid.income.values, valid_preds)
print(f"Fold={fold}, AUC={auc}")
if __name__ == "__main__":
for i in range(5):
run(i)
| YasudaKaito/aaamlp_transcription | project/src/chapter5/census_lbl_xgb.py | census_lbl_xgb.py | py | 1,606 | python | en | code | 0 | github-code | 36 |
38567827389 | '''
542. 01 Matrix
Given a matrix consists of 0 and 1, find the distance of the nearest 0 for each cell.
The distance between two adjacent cells is 1.
Example 1:
Input:
[[0,0,0],
[0,1,0],
[0,0,0]]
Output:
[[0,0,0],
[0,1,0],
[0,0,0]]
Example 2:
Input:
[[0,0,0],
[0,1,0],
[1,1,1]]
Output:
[[0,0,0],
[0,1,0],
[1,2,1]]
Note:
The number of elements of the given matrix will not exceed 10,000.
There are at least one 0 in the given matrix.
The cells are adjacent in only four directions: up, down, left and right.
'''
class Solution:
def updateMatrix(self, matrix):
row = len(matrix)
column = len(matrix[0])
q = []
for r in range(row):
for c in range(column):
if not matrix[r][c]:
q.append((r, c))
else:
matrix[r][c] = 999
while q:
r, c = q.pop(0)
distance = matrix[r][c] + 1
directions = [(r+1, c), (r-1, c), (r, c+1), (r, c-1)]
for rw, col in directions:
if 0 <= rw < row and 0 <= col < column and matrix[rw][col] > distance:
matrix[rw][col] = distance
q.append((rw, col))
return matrix
matrix = [
[0,0,0],
[0,1,0],
[1,1,1]]
sol = Solution()
print(sol.updateMatrix(matrix))
| archanakalburgi/Algorithms | Graphs/matrix01.py | matrix01.py | py | 1,343 | python | en | code | 1 | github-code | 36 |
902272609 | from multiprocessing import Process,Queue
import os,time
def write(q):
print('启动写子进程%s' % os.getpid())
for chr in ["A","B","C","D"]:
q.put(chr)
time.sleep(1)
print('结束写子进程%s' % os.getpid())
def read(q):
print('启动读子进程%s'% os.getpid())
while True:
value= q.get(True)
print("value= "+value)
print('结束读子进程%s'% os.getpid())
if __name__=='__main__':
print('父进程开始')
#父进程创建队列,并传递哥子进程
q = Queue()
pw = Process(target=write,args=(q,))
pr = Process(target=read,args=(q,))
pw.start()
pr.start()
#
pw.join()
#pr进程是个死循环,无法等待其结束,只能强行结束
pr.terminate()
print('父进程结束') | hughgo/Python3 | 基础代码/进程/10 进程间通信.py | 10 进程间通信.py | py | 805 | python | en | code | 10 | github-code | 36 |
10598156961 | from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
class service_config_settings(models.TransientModel):
_name = 'service.config.settings'
_inherit = ['sale.config.settings', 'fetchmail.config.settings']
_columns = {
'alias_prefix': fields.char('Default Alias Name for Notification'),
'alias_domain' : fields.char('Alias Domain'),
}
_defaults = {
'alias_domain': lambda self, cr, uid, context: self.pool['mail.alias']._get_alias_domain(cr, SUPERUSER_ID, [1], None, None)[1],
}
def _find_default_lead_alias_id(self, cr, uid, context=None):
alias_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'crm.mail_alias_lead_info')
if not alias_id:
alias_ids = self.pool['mail.alias'].search(
cr, uid, [
('alias_model_id.model', '=', 'crm.lead'),
('alias_force_thread_id', '=', False),
('alias_parent_model_id.model', '=', 'crm.case.section'),
('alias_parent_thread_id', '=', False),
('alias_defaults', '=', '{}')
], context=context)
alias_id = alias_ids and alias_ids[0] or False
return alias_id
def get_default_alias_prefix(self, cr, uid, ids, context=None):
alias_name = False
alias_id = self._find_default_lead_alias_id(cr, uid, context=context)
if alias_id:
alias_name = self.pool['mail.alias'].browse(cr, uid, alias_id, context=context).alias_name
return {'alias_prefix': alias_name}
def set_default_alias_prefix(self, cr, uid, ids, context=None):
mail_alias = self.pool['mail.alias']
for record in self.browse(cr, uid, ids, context=context):
alias_id = self._find_default_lead_alias_id(cr, uid, context=context)
if not alias_id:
create_ctx = dict(context, alias_model_name='crm.lead', alias_parent_model_name='crm.case.section')
alias_id = self.pool['mail.alias'].create(cr, uid, {'alias_name': record.alias_prefix}, context=create_ctx)
else:
mail_alias.write(cr, uid, alias_id, {'alias_name': record.alias_prefix}, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| dtorresxp/deltatech | deltatech_service_maintenance/res_config.py | res_config.py | py | 2,336 | python | en | code | 0 | github-code | 36 |
25451901336 | from flask import Blueprint
from marketplace import db, login_required
from marketplace.models import Item, Tag
tag_item = Blueprint('tag_item', __name__)
@tag_item.route('/tag_item/<item_id>/<tag>')
@login_required
def tag_an_item(item_id, tag):
# Get matching item
matching_items = db.session.query(Item).filter_by(id=item_id).all()
if len(matching_items) == 0:
return "No matching items found!"
if len(matching_items) > 1:
return "Too many items found!"
# Get existing item tags and ensure not already there
item_tags = matching_items[0].tags
exists = False
for existing_tag in item_tags:
if existing_tag.name == tag:
exists = True
if exists:
return "Already exists!"
# If not, see if the tag is already in the tag database
tag_t = ""
matching_tags = db.session.query(Tag).filter_by(name=tag).all()
if len(matching_tags) == 0:
# No? Create tag
tag_t = Tag(tag)
db.session.add(tag_t)
db.session.commit()
else:
# Add item to new/existing tag
tag_t = matching_tags[0]
# Pair up item with tag
matching_items[0].tags.append(tag_t)
db.session.commit()
return "Added tag!" | adicu/marketplace | marketplace/routes/tag_item.py | tag_item.py | py | 1,239 | python | en | code | 3 | github-code | 36 |
10665778183 | import math
import os
from glumpy import glm
from PIL import Image, ImageTk
import numpy
import tkinter
import cv2
def load_image(file_name, size):
image = Image.open(file_name)
image = numpy.array(image)
image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR)
image = cv2.resize(image, size, interpolation=cv2.INTER_NEAREST)
image = Image.fromarray(image)
photo_image = ImageTk.PhotoImage(image)
return image, photo_image
class ImgToChunk():
def __init__(self):
super(ImgToChunk, self).__init__()
self.window = tkinter.Tk(className='ImgToChunk')
self.window.geometry('1600x900')
self.window.bind('w', lambda event: self.key_fn('w'))
self.window.bind('a', lambda event: self.key_fn('a'))
self.window.bind('s', lambda event: self.key_fn('s'))
self.window.bind('d', lambda event: self.key_fn('d'))
self.window.bind('q', lambda event: self.key_fn('q'))
self.window.bind('e', lambda event: self.key_fn('e'))
self.window.bind('r', lambda event: self.key_fn('r'))
self.window.bind('f', lambda event: self.key_fn('f'))
self.window.bind('t', lambda event: self.key_fn('t'))
self.window.bind('g', lambda event: self.key_fn('g'))
self.image, self.photo_image = load_image('data/1.png', (1280, 720))
self.image_canvas = tkinter.Canvas(self.window, width=1280, height=720)
self.image_canvas.create_image((0, 0), image=self.photo_image, anchor='nw')
self.image_canvas.place(x=10, y=10)
self.image_canvas.bind("<Button-1>", self.image_click_fn)
self.cube_x = 0
self.cube_y = 0
self.cube_z = -5
self.yaw = 0
self.roll = 0
self.projection = glm.perspective(70, float(1280) / 720, 0.1, 100)
self.cube_image = self.image
self.cube_image_tk = self.photo_image
self.grid_canvases = []
self.grid_images = []
self.grid_photo_images = []
for row in range(6):
self.grid_canvases.append([])
self.grid_images.append([])
self.grid_photo_images.append([])
for col in range(2):
grid_image, grid_photo_image = load_image('block/dirt.png', (125, 125))
self.grid_images[row].append(grid_image)
self.grid_photo_images[row].append(grid_photo_image)
canvas = tkinter.Canvas(self.window, width=125, height=125)
canvas.place(x=1300 + col * 125, y=0 + row * 125)
canvas.create_image((0, 0), image=grid_photo_image, anchor='nw')
self.grid_canvases[row].append(canvas)
self.x_label = tkinter.Label(self.window, text='x: ')
self.x_label.config(font=('Courier', 12), width=20)
self.x_label.place(x=10, y=740)
self.y_label = tkinter.Label(self.window, text='y: ')
self.y_label.config(font=('Courier', 12), width=20)
self.y_label.place(x=210, y=740)
self.z_label = tkinter.Label(self.window, text='z: ')
self.z_label.config(font=('Courier', 12), width=20)
self.z_label.place(x=410, y=740)
self.roll_label = tkinter.Label(self.window, text='roll: ')
self.roll_label.config(font=('Courier', 12), width=20)
self.roll_label.place(x=610, y=740)
self.yaw_label = tkinter.Label(self.window, text='yaw: ')
self.yaw_label.config(font=('Courier', 12), width=20)
self.yaw_label.place(x=810, y=740)
# self.x_line = None
# self.y_line = None
#
# self.image_x_label = tkinter.Label(self.window, text='image x:')
# self.image_x_label.place(x=1300, y=10)
# self.image_x_label.config(font=("Courier", 15))
# self.image_x_entry = tkinter.Entry(self.window)
# self.image_x_entry.place(x=1400, y=10)
# self.image_x_entry.config(font=("Courier", 15), width=10)
#
# self.image_y_label = tkinter.Label(self.window, text='image y:')
# self.image_y_label.place(x=1300, y=35)
# self.image_y_label.config(font=("Courier", 15))
# self.image_y_entry = tkinter.Entry(self.window)
# self.image_y_entry.place(x=1400, y=35)
# self.image_y_entry.config(font=("Courier", 15), width=10)
#
# self.image_lines = []
#
# self.update_button = tkinter.Button(self.window, text='Update', width=10, command=self.update_button_fn)
# self.update_button.place(x=1400, y=65)
#
# self.selected_entry_row = 0
# self.selected_entry_col = 0
# self.grid_entries = []
# self.entry_grids = {}
# for row in range(10):
# self.grid_entries.append([])
# for col in range(2):
# entry = tkinter.Entry(self.window)
# entry.place(x=1300 + 50 * col, y=65 + 25 * row)
# entry.config(font=("Courier", 15), width=4)
# entry.bind('<1>', self.entry_click_fn)
# entry.bind('<Enter>', self.update_button_fn)
# self.grid_entries[row].append(entry)
# self.entry_grids[entry] = (row, col)
self.block_images = []
for root, dir, files in os.walk('block_subset'):
for file in files:
path = os.path.join(root, file)
block_image, block_image_tk = load_image(path, (200, 200))
self.block_images.append(block_image)
self.window.mainloop()
def key_fn(self, key):
if key == 'a': # Left
self.cube_x -= 0.1
elif key == 'd': # Right
self.cube_x += 0.1
elif key == 'q': # Up
self.cube_y -= 0.1
elif key == 'e': # Down
self.cube_y += 0.1
elif key == 'w': # Up
self.cube_z -= 0.1
elif key == 's': # Down
self.cube_z += 0.1
elif key == 'r': # Up
self.roll += 1
elif key == 'f': # Down
self.roll += -1
elif key == 't': # Up
self.yaw += 1
elif key == 'g': # Down
self.yaw += -1
self.x_label['text'] = 'x: {:2f}'.format(self.cube_x)
self.y_label['text'] = 'y: {:2f}'.format(self.cube_y)
self.z_label['text'] = 'z: {:2f}'.format(self.cube_z)
self.roll_label['text'] = 'roll: {:2f}'.format(self.roll)
self.yaw_label['text'] = 'yaw: {:2f}'.format(self.yaw)
view = numpy.eye(4, dtype=numpy.float32)
glm.rotate(view, self.yaw, 0, 1, 0)
glm.rotate(view, self.roll, 1, 0, 0)
model = numpy.eye(4, dtype=numpy.float32)
glm.translate(model, self.cube_x, self.cube_y, self.cube_z)
vertices = numpy.array([[1, 1, 1], [0, 1, 1], [0, 0, 1], [1, 0, 1],
[1, 0, 0], [1, 1, 0], [0, 1, 0], [0, 0, 0]])
vertices = numpy.column_stack((vertices, numpy.ones((vertices.shape[0], 1)))) @ model @ view @ self.projection
vertices = vertices[:, :2] / numpy.reshape(vertices[:, 3], (vertices.shape[0], 1))
vertices = (vertices + 1) * numpy.array([1280 / 2, 720 / 2])
indices = [[0, 1, 2, 3], [0, 1, 6, 5], [0, 5, 4, 3], [1, 6, 7, 2], [3, 4, 7, 2], [5, 6, 7, 4]]
polygons = [numpy.array([vertices[indices[i]]]).astype(int) for i in range(len(indices))]
self.cube_image = numpy.array(self.image, numpy.uint8)
cv2.polylines(self.cube_image, polygons, True, 255)
self.cube_image = Image.fromarray(self.cube_image)
self.cube_image_tk = ImageTk.PhotoImage(self.cube_image)
self.image_canvas.delete('all')
self.image_canvas.create_image((0, 0), image=self.cube_image_tk, anchor='nw')
dst_points = numpy.array([[125, 125], [0, 125], [0, 0], [125, 0]])
for row, polygon in enumerate(polygons):
homography, status = cv2.findHomography(polygon, dst_points)
tile_image = numpy.array(self.image, numpy.uint8)
tile_image = cv2.warpPerspective(tile_image, homography, (125, 125))
self.grid_images[row][0] = Image.fromarray(tile_image)
self.grid_images[row][1] = self.get_most_similar_image(self.grid_images[row][0])
self.grid_photo_images[row][0] = ImageTk.PhotoImage(self.grid_images[row][0])
self.grid_photo_images[row][1] = ImageTk.PhotoImage(self.grid_images[row][1])
self.grid_canvases[row][0].delete('all')
self.grid_canvases[row][0].create_image((0, 0), image=self.grid_photo_images[row][0], anchor='nw')
self.grid_canvases[row][1].delete('all')
self.grid_canvases[row][1].create_image((0, 0), image=self.grid_photo_images[row][1], anchor='nw')
def image_click_fn(self, event):
self.image_x_entry.delete(0, tkinter.END)
self.image_x_entry.insert(0, '{}'.format(event.x))
self.image_y_entry.delete(0, tkinter.END)
self.image_y_entry.insert(0, '{}'.format(event.y))
if self.selected_entry_row is not None:
self.grid_entries[self.selected_entry_row][0].delete(0, tkinter.END)
self.grid_entries[self.selected_entry_row][0].insert(0, '{}'.format(event.x))
self.grid_entries[self.selected_entry_row][1].delete(0, tkinter.END)
self.grid_entries[self.selected_entry_row][1].insert(0, '{}'.format(event.y))
self.selected_entry_row += 1
if self.selected_entry_row > 3:
self.selected_entry_row = 0
if self.x_line:
self.image_canvas.delete(self.x_line)
if self.y_line:
self.image_canvas.delete(self.y_line)
self.x_line = self.image_canvas.create_line(event.x, event.y - 10, event.x, event.y + 10, fill='white', width=2)
self.y_line = self.image_canvas.create_line(event.x - 10, event.y, event.x + 10, event.y, fill='white', width=2)
self.update_button_fn()
def entry_click_fn(self, event: tkinter.Event):
if event.widget in self.entry_grids:
self.selected_entry_row = self.entry_grids[event.widget][0]
self.selected_entry_col = self.entry_grids[event.widget][1]
print(self.selected_entry_row, self.selected_entry_col)
def update_button_fn(self, event=None):
src_points = []
for row in range(4):
src_points.append([])
for col in range(2):
s_val = self.grid_entries[row][col].get()
if s_val.isdigit():
src_points[row].append(int(s_val))
else:
return
for image_line in self.image_lines:
self.image_canvas.delete(image_line)
self.image_lines = [self.image_canvas.create_line(src_points[0][0],
src_points[0][1],
src_points[1][0],
src_points[1][1],
fill='white', width=2),
self.image_canvas.create_line(src_points[1][0],
src_points[1][1],
src_points[2][0],
src_points[2][1],
fill='white', width=2),
self.image_canvas.create_line(src_points[2][0],
src_points[2][1],
src_points[3][0],
src_points[3][1],
fill='white', width=2),
self.image_canvas.create_line(src_points[3][0],
src_points[3][1],
src_points[0][0],
src_points[0][1],
fill='white', width=2)
]
src_points = numpy.array(src_points)
dst_points = numpy.array([[0, 0], [0, 200], [200, 200], [200, 0]])
homography, status = cv2.findHomography(src_points, dst_points)
# self.trans_tile_canvas.delete('all')
# self.trans_tile_image = cv2.warpPerspective(numpy.array(self.image), homography, (200, 200))
# self.trans_tile_image = Image.fromarray(self.trans_tile_image)
# self.trans_tile_photo_image = ImageTk.PhotoImage(self.trans_tile_image)
# self.trans_tile_canvas.create_image((0, 0), image=self.trans_tile_photo_image, anchor='nw')
#
# self.pred_tile_canvas.delete('all')
# self.pred_tile_image = self.get_most_similar_image(self.trans_tile_image)
# self.pred_tile_photo_image = ImageTk.PhotoImage(self.pred_tile_image)
# self.pred_tile_canvas.create_image((0, 0), image=self.pred_tile_photo_image, anchor='nw')
def get_most_similar_image(self, trans_tile_image):
min_result = math.inf
min_image = None
for i, block_image in enumerate(self.block_images):
trans_tile_image2 = cv2.resize(numpy.array(trans_tile_image), (16, 16))
block_image2 = cv2.resize(numpy.array(block_image), (16, 16))
result = trans_tile_image2.astype(int) - block_image2
result = numpy.sum(numpy.abs(result))
#result = result * result
#result = numpy.sum(result)
# cv2.imshow('abc', numpy.array(trans_tile_image))
# cv2.waitKey()
# cv2.imshow('abc', numpy.array(block_image))
# cv2.waitKey()
if result < min_result:
min_result = result
min_image = block_image
return min_image
img_to_chunk = ImgToChunk()
| chahyon-ku/ImgToChunk | ImgToChunk.py | ImgToChunk.py | py | 14,064 | python | en | code | 0 | github-code | 36 |
352049180 | import os
import sys
import subprocess
import shutil
from args import launch_parse_args
def main():
print("start", __file__)
args = launch_parse_args()
print(args)
visible_devices = args.visible_devices.split(',')
assert os.path.isfile(args.training_script)
assert len(visible_devices) >= args.nproc_per_node
print('visible_devices:{}'.format(visible_devices))
# spawn the processes
processes = []
cmds = []
log_files = []
env = os.environ.copy()
env['RANK_SIZE'] = str(args.nproc_per_node)
cur_path = os.getcwd()
for rank_id in range(0, args.nproc_per_node):
os.chdir(cur_path)
device_id = visible_devices[rank_id]
rank_dir = os.path.join(cur_path, 'rank{}'.format(rank_id))
env['RANK_ID'] = str(rank_id)
env['DEVICE_ID'] = str(device_id)
if os.path.exists(rank_dir):
shutil.rmtree(rank_dir)
os.mkdir(rank_dir)
os.chdir(rank_dir)
cmd = [sys.executable, '-u']
cmd.append(args.training_script)
cmd.extend(args.training_script_args)
log_file = open(f'{rank_dir}/log{rank_id}.log', 'w')
process = subprocess.Popen(cmd, stdout=log_file, stderr=log_file, env=env)
processes.append(process)
cmds.append(cmd)
log_files.append(log_file)
for process, cmd, log_file in zip(processes, cmds, log_files):
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(returncode=process, cmd=cmd)
log_file.close()
if __name__ == "__main__":
main()
| kungfu-team/mindspore | model_zoo/official/cv/mobilenetv2/src/launch.py | launch.py | py | 1,599 | python | en | code | 3 | github-code | 36 |
4035076126 | def binary_search(array, target, start, end):
while start <= end:
mid = (start + end) // 2
if array[mid] == target:
return mid
elif array[mid] > target:
end = mid - 1
else:
start = mid + 1
return None
def solution(N, items, R, r_items ):
answer = []
items.sort()
for r_item in r_items:
if binary_search(items, r_item, 0, len(items) - 1) is None:
answer.append("no")
else:
answer.append("yes")
return answer
print(solution(5, [8, 3, 7, 9 ,2], 3, [5, 7, 9]))
| kakaocloudschool/dangicodingtest | 006_이진탐색/001_이코테/003_부품찾기_이진탐색.py | 003_부품찾기_이진탐색.py | py | 596 | python | en | code | 0 | github-code | 36 |
37634698673 | from turtle import Turtle, Screen
timmy = Turtle()
print(timmy)
timmy.shape("turtle")
timmy.color("coral")
timmy.forward(100)
myScreen = Screen()
print(myScreen.canvheight) # canvheight() is the height of the turtle screen
print(myScreen.canvwidth)
myScreen.exitonclick() # exitonclick() is used to show the turtle screen till we click onto he screen
| anchalsinghrajput/python | turtle/01 forward.py | 01 forward.py | py | 375 | python | en | code | 0 | github-code | 36 |
35263829572 | #!/usr/bin/env python
# encoding: utf-8
from numpy.distutils.core import setup, Extension
module1 = Extension('_floris', sources=['src/FLORISSE3D/floris.f90', 'src/FLORISSE3D/adStack.c', 'src/FLORISSE3D/adBuffer.f'],
extra_compile_args=['-O2', '-c'])
module2 = Extension('_florisDiscontinuous', sources=['src/FLORISSE3D/florisDiscontinuous.f90', 'src/FLORISSE3D/adStack.c', 'src/FLORISSE3D/adBuffer.f'],
extra_compile_args=['-O2', '-c'])
module3 = Extension('_shellbuckling', sources=['src/FLORISSE3D/ShellBuckling.f90'],
extra_compile_args=['-O2', '-c'])
module4 = Extension('_axialShear', sources=['src/FLORISSE3D/Axial_Shear.f90'],
extra_compile_args=['-O2', '-c'])
setup(
name='FLORISSE3D',
version='0.0.0',
description='differentiable floris wake model with cosine factor',
install_requires=['openmdao>=1.5','akima>=1.0.0'],
package_dir={'': 'src'},
ext_modules=[module1, module2, module3, module4],
dependency_links=['https://github.com/andrewning/akima/tarball/master#egg=akima'],
packages=['FLORISSE3D'],
license='Apache License, Version 2.0',
)
| byuflowlab/stanley2018-turbine-design | FLORISSE3D/setup.py | setup.py | py | 1,172 | python | en | code | 1 | github-code | 36 |
25163452547 | #!/usr/bin/env python
import typer
import logging
import os
# logging.basicConfig(level=logging.INFO, format="%(asctime)s %(filename)s: %(levelname)6s %(message)s")
#
# LOG = logging.getLogger(__name__)
from easul.driver import MemoryDriver
app = typer.Typer(help="EASUL tools to manage and extend the abilities of the library. Most of the tools are related to the running and monitoring the engine.", pretty_exceptions_enable=False)
@app.command(help="View visuals for a specific step")
def view_visual(plan_module, stepname:str):
from easul.util import create_package_class
plan = create_package_class(plan_module)
step = plan.steps[stepname]
driver = MemoryDriver.from_reference("VISUAL")
html = step.render_visual(driver, plan.steps)
import tempfile
fd = tempfile.NamedTemporaryFile(suffix=".html", delete=False)
fd.write(str(html).encode("utf8"))
fd.close()
os.system(f"open {fd.name}")
@app.command(help="Regenerate model algorithm and context data for EASUL tests.", epilog="NOTE: Only use this if files are lost or corrupted - it may require changes to tests.")
def regenerate_test_models():
from easul.manage.regenerate import generate_test_models
generate_test_models()
@app.command(help="Run EASUL engine according to provided configuration")
def run_engine(plan_module:str, engine_module:str):
from easul.util import create_package_class
plan = create_package_class(plan_module)()
engine = create_package_class(engine_module)()
engine.run(plan)
@app.command(help="Monitor EASUL broker for supplied plan/engine")
def monitor_broker(plan_module:str, engine_module:str):
from easul.util import create_package_class
plan = create_package_class(plan_module)()
engine = create_package_class(engine_module)()
from easul.manage.monitor import monitor_client
monitor_client(engine, plan)
if __name__ == "__main__":
app()
| rcfgroup/easul | manage.py | manage.py | py | 1,926 | python | en | code | 1 | github-code | 36 |
9567324814 | import logging
import airflow
from airflow.models import DAG
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
from utils.slugify import slugify
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(2)
}
categorias = [
{'actividad_id' : 11,
'actividad' : "Agricultura, ganaderia, aprovechamiento forestal, pesca y caza"},
{'actividad_id' : 21,
'actividad' : "Mineria"},
{'actividad_id' : 22,
'actividad' : "Electricidad, agua y suministro de gas por ductos al consumidor final"},
{'actividad_id' : 23,
'actividad' : "Construccion"},
{'actividad_id': 31,
'actividad' : "Industrias manufactureras"},
{'actividad_id' : 43,
'actividad' : "Comercio al por mayor"},
{'actividad_id' : 46,
'actividad' : "Comercio al por menor"},
{'actividad_id' : 48,
'actividad' : "Transporte, correos y almacenamiento"},
{'actividad_id' : 51,
'actividad' : "Informacion en medios masivos"},
{'actividad_id' : 52,
'actividad' : "Servicios financieros y de seguros"},
{'actividad_id' : 53,
'actividad' : "Servicios inmobiliarios y de alquiler de bienes muebles e intangibles"},
{'actividad_id' : 54,
'actividad' : "Servicios profesionales, cientificos y tecnicos"},
{'actividad_id' : 55,
'actividad' : "Direccion de corporativos y empresas"},
{'actividad_id' : 56,
'actividad' : "Apoyo a los negocios y manejo de desechos y serv. de remediacion"},
{'actividad_id' : 61,
'actividad' : "Servicios educativos"},
{'actividad_id' : 62,
'actividad' : "Servicios de salud y de asistencia social"},
{'actividad_id' : 71,
'actividad' : "Serv. de esparcimiento culturales y deportivos, y otros serv. recreativos"},
{'actividad_id' : 72,
'actividad' : "Servicios de alojamiento temporal y de preparacion de alimentos y bebidas"},
{'actividad_id' : 81,
'actividad' : "Otros servicios excepto actividades del gobierno"},
{'actividad_id' : 93,
'actividad' : "Actividades del gobierno y organismos internacionales extraterritoriales"},
]
dag = DAG(
dag_id='03_siem_informacion_empresarial', default_args=args,
schedule_interval='@monthly')
start_node = DummyOperator(task_id='inicio',
dag=dag)
end_node = DummyOperator(task_id='fin',
dag=dag)
def tareas_categorias(categorias):
previous_task = None
for i in categorias:
task = DummyOperator(task_id=slugify(i['actividad'])[:20],
dag=dag)
if previous_task:
previous_task.set_downstream(task)
else:
start_node.set_downstream(task)
previous_task = task
task.set_downstream(end_node)
tareas_categorias(categorias)
| erikriver/mixtli-etc | dags/03_siem_informacion_empresarial.py | 03_siem_informacion_empresarial.py | py | 2,866 | python | es | code | 2 | github-code | 36 |
35722574041 | from tkinter import Tk, Frame, Button, Text
from tkinter.ttk import Frame, Button
from tkinter.filedialog import askopenfile
class Application():
def __init__(self, root, title):
self.root = root
self.root.title(title)
# Variable that stores file handle (may be unnecessary)
self.file_handle = ""
master_frame = Frame(root)
master_frame.pack(expand="yes", fill="both")
# Create left button frame and buttons
button_frame = Frame(master_frame)
self.open_button = Button(button_frame, text="Choose File", command=self.load_file)
self.open_button.pack(expand="yes", fill="both")
self.apply_button = Button(button_frame, text="Apply", command=self.apply_consistent, state="disabled")
self.apply_button.pack(expand="yes", fill="both")
self.save_button = Button(button_frame, text="Save File", command=self.save_file, state="disabled")
self.save_button.pack(expand="yes", fill="both")
# Create text frame and initialize text widget
text_frame = Frame(master_frame)
self.text_box = Text(text_frame, height=10, width=50, state="disabled")
self.text_box.pack(side="top", expand="yes", fill="both")
# Configure weights for grid elements
master_frame.columnconfigure(0, weight=1)
master_frame.columnconfigure(1, weight=5)
for i in range(3):
master_frame.rowconfigure(i, weight=1)
# Position button and text frames
button_frame.grid(row=0, column=0, rowspan=3, sticky="nsew")
text_frame.grid(row=0, column=1, rowspan=3, sticky="nsew")
self.root.minsize(500, 200)
# Function which prompts user to select css file to open
def load_file(self):
fname = askopenfile(mode='r', filetypes=([("Cascading Style Sheet Document", "*.css")]))
self.file_handle = fname
if fname:
self.apply_button["state"] = "enabled" # Enables other button after successful
self.save_button["state"] = "enabled" # file load
self.change_text(fname.read())
self.parse_file(fname)
# Function to parse readlines to a more managable form
def parse_file(self, file_handle):
return
# Function to potentially apply sorting scheme to file contents
def apply_consistent(self):
return
# To be called when save button is pressed and will allow user to save as they wish
def save_file(self):
return
# Function used to edit text field
def change_text(self, text):
self.text_box["state"] = "normal" # Enables text widget for editing
self.text_box.delete(1.0, "end") # Clears current text
self.text_box.insert("end", text) # Enters file contents to text widget
self.text_box["state"] = "disabled" # Re-disables text widget
def main():
root = Tk()
Application(root, "Consistent CSS")
root.mainloop()
if __name__ == '__main__':
main()
| Petetete/Consistent-CSS | consistent-css.py | consistent-css.py | py | 3,060 | python | en | code | 0 | github-code | 36 |
12777874879 | class Underscore:
def map(self, iterable, callback):
for i in range(len(iterable)):
iterable[i] = callback(iterable[i])
return iterable
def find(self, iterable, callback):
for i in range(len(iterable)):
if (callback(iterable[i]) == True):
return iterable[i]
def filter(self, iterable, callback):
arr = []
for i in range(len(iterable)):
if(callback(iterable[i]) == True):
arr.append(iterable[i])
return arr
def reject(self, iterable, callback):
arr = []
for i in range(len(iterable)):
if(callback(iterable[i]) == False):
arr.append(iterable[i])
return arr
# your code
# you just created a library with 4 methods!
# let's create an instance of our class
# yes we are setting our instance to a variable that is an underscore
# evens = _.filter([1, 2, 3, 4, 5, 6], lambda x: x % 2 == 0)
# should return [2, 4, 6] after you finish implementing the code above
arr = [1, 2, 3, 4]
Zolter = Underscore()
print(Zolter.map(arr, lambda x: x**2))
print(Zolter.reject([1, 2, 3, 4, 5, 6], lambda x: x % 2 == 0))
print(Zolter.filter([1, 2, 3, 4, 5, 6], lambda x: x % 2 == 0))
print(Zolter.find([1, 2, 3, 4, 5, 6], lambda x: x > 3))
string = "ABSCAASDA"
subString = "DA"
def count_substrings(string, substring):
string_size = len(string)
substring_size = len(substring)
count = 0
for i in range(0, string_size-substring_size+1):
if string[i:i+substring_size] == substring:
count += 1
return count
print(count_substrings(string, subString))
| Salman-Khatib/All_coding_dojo | python_stack/_python/python_fundementals/UnderScore/Underscore.py | Underscore.py | py | 1,681 | python | en | code | 0 | github-code | 36 |
39133833136 | def solution(a):
a.sort()
if max(a) < 0:
digit = 1
if len(a) == 1:
if a[0] < 1:
digit = 1
else:
digit = a[0] + 1
else:
if a[0] > 0:
for x in range(a[0], a[-1] + 2):
if x not in a:
digit = x
break
else:
for x in range(1, a[-1] + 2):
if x not in a:
digit = x
break
return digit
print(solution([-5, 1, 3]))
print(solution([30, 50, 100]))
print(solution([0, 6]))
print(solution([90, 98]))
print(solution([1, 2, 3, -1, -2, -8]))
| briankiume/DemoCodility | DemoCodility.py | DemoCodility.py | py | 647 | python | en | code | 0 | github-code | 36 |
3883658721 |
# =============================================================
# Imports
# =============================================================
import logging
import smtplib
from server.utils import notification
# =============================================================
# Constant
# =============================================================
MAIL_SERVER = 'mail.haligonia.home.com'
ESXI_CONTROLLER_ADDRESS = 'esxicontroller@mail.haligonia.home.com'
# =============================================================
# Source
# =============================================================
class notificationDispatch(object):
"""
This is the message dispatcher for the ESXI controller
framework.
"""
# The destination address
__destination = None
# The message type
__msg_type = None
# The message to send
__message = None
# Logger
__logger = None
def __init__(self, log_level=logging.INFO):
"""
This is the default constructor for the class
:return:
"""
self.__logger = logging.getLogger("ESXiController - VmNotificationDispatch")
self.__logger.setLevel(log_level)
return
def send_notification(self, dest_address, msg_type, reason, configs):
"""
This sends out the message object.
:param dest_address: the destination address
:param msg_type: the message type to send
:param reason: the reason to notify
:param configs: the configs
:return:
"""
# Set internals
self.__destination = dest_address
self.__msg_type = msg_type
# We create an smtp server on our mail server
server = smtplib.SMTP(MAIL_SERVER)
# Create the message
self.__setup_message(reason, configs)
# Send the message
server.sendmail(ESXI_CONTROLLER_ADDRESS, self.__destination, self.__message.as_string())
server.quit()
return
def __setup_message(self, reason, configs):
"""
This is the message setup routine that is called when writing the
notification email.
:param reason: the reason
:param configs: the vm configs
:return:
"""
# We get the message type
self.__message = notification.get(self.__msg_type)
self.__message = notification.format(self.__destination,
self.__message,
reason,
configs)
return | CaptFrank/EsxiController | server/utils/notification/notificationdispatch.py | notificationdispatch.py | py | 2,687 | python | en | code | 0 | github-code | 36 |
1621435969 | #!/usr/bin/env python
# coding=utf-8
import string, random
class LengthError(ValueError):
def __init__(self, arg):
self.args = arg
def pad_zero_to_left(inputNumString, totalLength):
"""生成后四位主键, 主键数字从0开始递增, 需要保持4位,不足的位置补0"""
lengthInputString = len(inputNumString)
if lengthInputString > totalLength:
LengthError("The length of in ")
else:
return '0' * (totalLength - lengthInputString) + inputNumString
keySheed = string.ascii_letters + string.digits
#生成随即的字符串
random_key = lambda x, y:"".join([random.choice(x) for i in range(y)])
def invitation_code_generator(quantity, length_random, length_key):
#规定主键与随即字符串之间通过L连接
placeHoldChar = "L"
for index in range(quantity):
tempString = ""
try:
yield random_key(keySheed, length_random) + placeHoldChar + \
pad_zero_to_left(str(index), length_key)
except LengthError:
print("Index exceeds the length of master key.")
for invitationCode in invitation_code_generator(200, 16, 4):
print(invitationCode)
| nbmyt/pythonPractice | 0001/0001v2.py | 0001v2.py | py | 1,187 | python | en | code | 0 | github-code | 36 |
40967939697 | from django.db import models
# null=True, blank=True это значит что данное поле может быть пустым, т.е. аватар не обязателен
NULLABLE = {'blank': True, 'null': True}
class Student(models.Model):
first_name = models.CharField(max_length=150, verbose_name='имя') # обязательно
last_name = models.CharField(max_length=150, verbose_name='фамилия') # обязательно
avatar = models.ImageField(upload_to='students/', verbose_name='аватар',
**NULLABLE) # не обязательно т.к. есть **NULLABLE
# для email у моделей есть специяльное поле, здесь такой метод применен для эксперимента
email = models.CharField(max_length=150, verbose_name='@email', unique=True, **NULLABLE)
comment = models.TextField(verbose_name='комментарий менеджера', **NULLABLE)
is_active = models.BooleanField(default=True, verbose_name='активный')
def __str__(self):
return f'{self.first_name}, {self.last_name}'
# def delete(self, *args, **kwargs):
# """Переопределение метода delete, теперь он деактивирует записи"""
# self.is_active = False
# self.save()
class Meta:
verbose_name = 'студент'
verbose_name_plural = 'студенты'
ordering = ('last_name',)
class Subject(models.Model):
title = models.CharField(max_length=150, verbose_name='название')
description = models.TextField(verbose_name='описание')
student = models.ForeignKey(Student, on_delete=models.CASCADE, verbose_name='студент')
def __str__(self):
return f'{self.title}'
class Meta:
verbose_name = 'предмет'
verbose_name_plural = 'предметы'
| DSulzhits/06_3_20_1_django_ORM | main/models.py | models.py | py | 1,958 | python | ru | code | 0 | github-code | 36 |
448240955 | # -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
import keras
from keras.models import Model
from keras.layers import Dense,Activation,Input
from keras.callbacks import ModelCheckpoint
X = np.random.normal(0,1,(100,8))
Y = np.random.normal(0,1,(100,1))
X.shape
batch = 32
valX,valY = np.random.normal(0,1,(100,8)),np.random.normal(0,1,(100,1))
class LossHistory(keras.callbacks.Callback):
def on_train_begin(self, logs={}):
self.losses = []
self.val_loss=[]
self.weights= []
def on_epoch_end(self, batch, logs={}):
self.losses.append(logs.get('loss'))
self.val_loss.append(logs.get('val_loss'))
self.weights.append(self.model.get_weights())
name = 'weights'+'_'+str(batch)+'.h5'
self.model.save_weights(name)
def keras_models(X,Y,kernel_init = 'random_uniform',output_activation = 'tanh',input_activation = 'relu',
validation_data = [valX,valY]):
losses = LossHistory()
ip = Input(batch_shape=(batch,X.shape[1]))
layer1 = Dense(32, kernel_initializer=kernel_init)(ip)
layer2 = Activation(input_activation)(layer1)
out = Dense(Y.shape[1],activation = output_activation)(layer2)
model = Model(inputs = ip,output = out)
model.compile(optimizer='adam',loss = 'mean_squared_error')
filepath="weights-improvement-{epoch:02d}-{val_acc:.2f}.hdf5"
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True)
callbacks_list = [losses]#,checkpoint]
model.fit(X,Y,validation_data=validation_data,batch_size=batch,epochs=100,callbacks=callbacks_list,verbose=1)
return model,losses
model = keras_models(X,Y,kernel_init = 'random_uniform',output_activation = 'tanh',input_activation = 'relu',
validation_data = [valX,valY]) | avilin66/Pyspark_codes | keras_basic_model.py | keras_basic_model.py | py | 1,902 | python | en | code | 1 | github-code | 36 |
2940420975 | import datetime
# An object for representing a package to be delivered.
class Package():
def __init__(self, package_id, address, city, state, zip, delivery_deadline, mass, special_notes,
arrival_time="8:00 AM", required_truck=-1, deliver_with=[]):
# an integer which is unique to each package
self.package_id = package_id
# the address this package needs to be delivered to
self.address = address
# the city this package needs to be delivered to
self.city = city
# the sate this package needs to be delivered to
self.state = state
# the zip code this package needs to be delivered to
self.zip = zip
# the time by which this package must be delivered
self.delivery_deadline = delivery_deadline
# the weight of the package, in kilograms
self.mass = mass
# any special notes that may modify what needs to happen
# for this package
self.special_notes = special_notes
# the time that this package arrives to the hub
self.arrival_time = arrival_time
# the truck that this package is required to travel on
self.required_truck = required_truck
# other packages that must be delivered with this one
self.deliver_with = deliver_with
# if the package is "at the hub", "en route", or "delivered"
self.delivery_status = "at the hub"
# at what time the package is delivered
self.delivery_time = None
# the truck number the package was delivered on
self.delivered_on = -1
# the time the package was loaded onto a truck
self.loaded_at = None
# allows for packages to be sorted on the delivery deadline.
def delivery_deadline_for_sort(self):
if type(self.delivery_deadline) == type(""):
return datetime.datetime.now().replace(hour=23, minute=59, second=59, microsecond=99999)
else:
return self.delivery_deadline
# return a string representation of a package
def __str__(self):
return (
f'(package_id: "{str(self.package_id).zfill(2)}" | address: "{self.address}"'
f' | delivery_deadline: "{self.delivery_deadline}" | city: "{self.city}" | zipcode: "{self.zip}" | mass: "{self.mass}"'
f' | loaded_at: "{self.loaded_at}" | delivery_status: "{self.delivery_status}" | delivery_time: "{self.delivery_time}" | delivered_on truck: "{self.delivered_on}")'
)
# return a string representation of a package
def __repr__(self):
return self.__str__()
# two packages are equal if their package ids are equal
def __eq__(self, other):
if type(self) == type(other):
return self.package_id == other.package_id
else:
return False
| joshsizer/wgu_projects | wgu_data_structures_and_algorithms_2/package.py | package.py | py | 2,855 | python | en | code | 0 | github-code | 36 |
3449169916 | # -*- coding: utf-8 -*-
"""
Módulo ``PreProcWindow``
========================
Implementa uma janela com funcionalidades de pré-processamento dos dados.
.. raw:: html
<hr>
"""
import inspect
import numpy as np
import pyqtgraph as pg
from PyQt5 import QtCore
from framework import file_m2k, file_civa, file_omniscan, post_proc, pre_proc
from guiqt.Windows import PreProcWindowDesign
from guiqt.Windows.ErrorWindow import ErrorWindow
from guiqt.Utils.ParameterRoot import ParameterRoot
class PreProcWindow(PreProcWindowDesign.Ui_pre_proc_dialog):
""" Classe responsável por abrir uma janela para aplicar algoritmos de pré-processamento nos dados carregados pela
janela principal. Os algoritmos são automaticamente reconhecidos, desde que estejam no arquivo
``framework/pre_proc.py``. É necessário que eles possuam ao menos dois parâmetros: ``data_insp`` e ``shots``, sendo
o primeiro uma instância da classe ``DataInsp`` e o segundo um `numpy.ndarray` com os índices dos disparos em que o
algoritmo será aplicado.
"""
def __init__(self, dialog, main_window):
""" Construtor da classe.
Parameters
----------
dialog : :class:`PyQt5.QtWidgets.QDialog`
Janela de diálogo.
main_window :class:`guiqt.gui.MainWindow`
Janela principal.
"""
self.setupUi(dialog)
self.dialog = dialog
dialog.setModal(True)
self.main_window = main_window
# encontra os algoritmos no modulo ``pre_proc``
algs = [x[0] for x in inspect.getmembers(pre_proc, inspect.isfunction)]
for i in range(len(algs)):
self.combo_box_alg.addItem(algs[i])
# cria a raiz da arvore de parametros
self.parameters_root = ParameterRoot()
# limita as spin boxes
self.spin_box_sequence.setRange(0, self.main_window.dados.ascan_data.shape[1] - 1)
self.spin_box_channel.setRange(0, self.main_window.dados.ascan_data.shape[2] - 1)
# conecta os sinais
self.combo_box_alg.currentIndexChanged.connect(self.alg_changed)
self.button_apply.clicked.connect(self.visualize)
self.button_save.clicked.connect(self.save)
self.button_reset.clicked.connect(self.reset)
self.button_resetall.clicked.connect(self.reset_all)
self.spin_box_channel.valueChanged.connect(self.redraw)
self.spin_box_sequence.valueChanged.connect(self.redraw)
self.spin_box_shot.valueChanged.connect(self.redraw)
# remove os menus de contexto
self.plot_widget_ascan.setMenuEnabled(False)
self.plot_widget_bscan.setMenuEnabled(False)
self.alg_changed()
try:
self.draw_ascan(self.main_window.dados.ascan_data[:, 0, 0, self.main_window.spin_box_shot.value()])
self.draw_bscan(self.main_window.dados.ascan_data[:, 0, :, self.main_window.spin_box_shot.value()])
except Exception: # a exceçao retornada nao e especifica
return
self.shot_pos = 0
self.last_result = self.main_window.dados.ascan_data[:, :, :, :]
shape = self.last_result.shape
self.spin_box_sequence.setRange(0, shape[1] - 1)
self.spin_box_channel.setRange(0, shape[2] - 1)
self.spin_box_shot.setRange(0, shape[3] - 1)
# remove botao '?'
dialog.setWindowFlags(dialog.windowFlags() ^ QtCore.Qt.WindowContextHelpButtonHint)
dialog.exec_()
def draw_ascan(self, data):
""" Desenha o A-scan do *slicing* selecionado.
Parameters
----------
data : :class:`numpy.ndarray`
A-scan a ser desenhado.
"""
self.plot_widget_ascan.getPlotItem().clear()
self.plot_widget_ascan.addItem(pg.PlotDataItem(data))
def draw_bscan(self, img):
""" Desenha o B-scan com os dados presentes no ``DataInsp`` carregado.
Parameters
----------
img : :class:`numpy.ndarray`
B-scan a ser desenhado.
"""
img_bscan = pg.ImageView() # cria um imageview
# coloca a imagem no imageview
max = np.max(np.abs(img))
img_bscan.setImage(post_proc.normalize(img.T, image_max=max, image_min=-max), levels=(0, 1))
img_bscan.getImageItem().setLookupTable(self.main_window.lut)
# mostra a imagem
self.plot_widget_bscan.getPlotItem().clear()
self.plot_widget_bscan.addItem(img_bscan.getImageItem())
# inverte a direção do eixo y
img_bscan.getImageItem().getViewBox().invertY()
# calcula os eixos
if img is not None:
# se passou a imagem, nao calcula os eixos
pass
else:
limits = QtCore.QRectF(self.main_window.img_rect_esq[0], self.main_window.img_rect_esq[1],
self.main_window.img_rect_esq[2] - self.main_window.img_rect_esq[0],
self.main_window.img_rect_esq[3] - self.main_window.img_rect_esq[1])
img_bscan.getImageItem().setRect(limits)
# centraliza a imagem
self.plot_widget_bscan.getPlotItem().autoRange()
def alg_changed(self):
""" Encontra os parâmetros do algoritmo selecionado. Assume que parâmetros com valor padrão ``None`` são
considerados do tipo ``float``.
"""
alg_index = self.combo_box_alg.currentIndex()
func_str = self.combo_box_alg.itemText(alg_index)
func = getattr(pre_proc, func_str)
func_params = inspect.signature(func)
params = [key for key in func_params.parameters.keys()]
defaults = [func_params.parameters[key].default for key in params]
self.parametertree.clear()
self.parameters_root = ParameterRoot()
# TODO: Usar ScalableGroup para adicionar os argumentos opcionais.
for i in range(len(params)):
if i == 0:
continue # o primeiro sempre é data_insp?
if defaults[i] is inspect._empty:
continue
type_val = type(defaults[i]).__name__
if type_val == 'NoneType':
self.parameters_root.addChild({'name': params[i], 'type': 'float', 'value': 0, 'decimals': 12})
elif params[i] == 'shots':
self.parameters_root.addChild({'name': params[i], 'type': 'ndarray', 'value': defaults[i], 'limits':
(0, self.main_window.dados.ascan_data.shape[3] - 1)})
elif type_val == 'ndarray':
self.parameters_root.addChild({'name': params[i], 'type': 'ndarray', 'value': defaults[i]})
else:
self.parameters_root.addChild({'name': params[i], 'type': type_val, 'value': defaults[i],
'decimals': 12})
self.parametertree.addParameters(self.parameters_root)
def apply_alg(self):
""" Executa o algoritmo selecionado.
"""
alg_index = self.combo_box_alg.currentIndex()
func_str = self.combo_box_alg.itemText(alg_index)
func = getattr(pre_proc, func_str)
try:
self.shot_pos = self.parameters_root.get_parameters()['shots'].astype(int)
except KeyError:
self.shot_pos = int(self.parameters_root.get_parameters()['shot'])
self.last_result = np.copy(self.main_window.dados.ascan_data[:, :, :, self.shot_pos], order='F')
try:
out = func(self.main_window.dados, **self.parameters_root.get_parameters())
self.spin_box_sequence.setRange(0, out.shape[1] - 1)
self.spin_box_channel.setRange(0, out.shape[2] - 1)
self.spin_box_shot.setRange(0, out.shape[3] - 1)
self.main_window.spin_box_sequence.setMaximum(out.shape[1] - 1)
self.main_window.spin_box_channel.setMaximum(out.shape[2] - 1)
self.main_window.spin_box_shot.setMaximum(out.shape[3] - 1)
self.main_window.ascan_max = np.max(np.abs(out))
return out
except Exception as e:
ErrorWindow("Error during preprocessing: " + e.args[0])
return None
def visualize(self):
""" Aplica o algoritmo selecionado. O resultado deverá ser salvo pelo algoritmo.
"""
out = self.apply_alg()
if out is None:
return
seq = self.spin_box_sequence.value()
chan = self.spin_box_channel.value()
shot = self.spin_box_shot.value()
self.draw_bscan(np.real(self.main_window.dados.ascan_data[:, seq, :, shot]))
self.draw_ascan(np.real(self.main_window.dados.ascan_data[:, seq, chan, shot]))
def save(self):
""" Chamado quando o botão para salvar é clicado. Como o algoritmo deve salvar o resultado, a janela irá apenas
fechar.
"""
# Apenas fecha a janela
self.dialog.close()
def reset(self):
""" Remove o ultimo processamento feito.
"""
if self.last_result.shape.__len__() == 3:
self.main_window.dados.ascan_data[:, :, :, self.shot_pos] = self.last_result[:, :, :]
else:
self.main_window.dados.ascan_data = self.last_result
self.redraw()
def reset_all(self):
""" Recarrega os A-scan, abrindo o arquivo novamente.
"""
if self.main_window.file[-4:] == ".m2k":
d = {'filename': self.main_window.file, 'type_insp': "immersion", 'water_path': 0.0, 'freq_transd': 5.0,
'bw_transd': 0.6, 'tp_transd': "gaussian"}
func = file_m2k.read
self.main_window.readonly_params = False
elif self.main_window.file[-5:] == ".civa":
d = {'filename': self.main_window.file, 'sel_shots': None}
func = file_civa.read
self.main_window.readonly_params = True
elif self.main_window.file[-4:] == ".opd":
d = {'filename': self.main_window.file, 'sel_shots': 0, 'freq': 5.0, 'bw': 0.6,
'pulse_type': "gaussian"}
func = file_omniscan.read
self.main_window.readonly_params = False
else:
if self.main_window.file:
ErrorWindow("Could not find file")
return
self.main_window.run_in_thread(func, d, self.reset_all_finished)
def reset_all_finished(self, data_insp):
self.main_window.finished_open_dir(data_insp)
self.last_result = self.main_window.dados.ascan_data
self.redraw()
def redraw(self):
""" Desenha novamente o A-scan e B-scan quando um *spin box* é alterado.
"""
seq = self.spin_box_sequence.value()
chan = self.spin_box_channel.value()
shot = self.spin_box_shot.value()
self.draw_bscan(np.real(self.main_window.dados.ascan_data[:, seq, :, shot]))
self.draw_ascan(np.real(self.main_window.dados.ascan_data[:, seq, chan, shot]))
| matheusfdario/role-finder | AUSPEX-smart_wedge/guiqt/Windows/PreProcWindow.py | PreProcWindow.py | py | 10,993 | python | pt | code | 0 | github-code | 36 |
1593524531 | import sqlite3
conn = sqlite3.connect('employee.db')
c = conn.cursor()
# c.execute("""CREATE TABLE employees (
# first text,
# last text,
# pay integer
# )""")
# c.execute("INSERT INTO employees VALUES ('Mary', 'oza', 70000)")
conn.commit()
c.execute("SELECT * FROM employees")
print(c.fetchall())
conn.commit()
conn.close()
| Parth-Ps/python | sqlite3_database/employees.py | employees.py | py | 347 | python | en | code | 0 | github-code | 36 |
10924502441 | """
版本:2.0
作者:sky
作用:判断密码强度
日期:20181008
"""
class PwdTool:
def __init__(self, pwd):
self.pwd_str = pwd
self.pwdstrength = 0
def check_num(self):
for c in self.pwd_str:
if c.isnumeric():
return True
return False
def check_str(self):
for c in self.pwd_str:
if c.isalpha():
return True
return False
def process(self):
# pwd > 8
if len(self.pwd_str) >= 8:
self.pwdstrength += 1
else:
print('密码长度要大于8个字符或数字')
# pwd has num
if self.check_num():
self.pwdstrength += 1
else:
print('密码需要含有数字')
# pwd has alpha
if self.check_str():
self.pwdstrength += 1
else:
print('密码需要含有字符')
def main():
trytimes = 5
while trytimes > 0:
pwd = input('请输入密码:')
tool = PwdTool(pwd)
tool.process()
f = open('pwd.txt', 'a')
f.write('密码:{},强度:{}。\n'.format(pwd, tool.pwdstrength))
f.close()
if tool.pwdstrength == 3:
print('密码强度合格')
break
else:
print('密码强度不合格')
trytimes -= 1
if trytimes <=0:
print('尝试次数太多!')
if __name__ == '__main__':
main() | shenkeyu/panduanmima | trypwd2.0.py | trypwd2.0.py | py | 1,473 | python | en | code | 0 | github-code | 36 |
73434601064 | import pickle
from tqdm import tqdm
import os
import pandas as pd
import numpy as np
from statsmodels.tsa.arima.model import ARIMA
from pmdarima.arima import auto_arima
def arima_model(test_codes, csv_filename, folder_path, n_output):
df = pd.read_csv(csv_filename)
n_output = n_output # output -> forecast for 12 months
loss = []
for code in tqdm(test_codes):
row = df[df['code'] == code].iloc[0]
filename = folder_path + code +'.csv'
if row['min'] != row['max']:
if os.path.isfile(filename):
df_temp = pd.read_csv(filename)
values = df_temp.iloc[:, 1]
valid_values = values[:-n_output]
actual_values = values[-n_output:]
order = auto_arima(valid_values, seasonal=False, stepwise=True, trace=False).order
model = ARIMA(valid_values, order=order)
model_fit = model.fit()
predictions = model_fit.predict(start=len(valid_values), end=len(valid_values) + n_output - 1)
mse = np.mean((predictions - actual_values) ** 2)
loss.append(mse)
print(np.mean(loss))
print(loss)
with open('my_list_ARIMA.pkl', 'wb') as file:
pickle.dump(loss, file)
return np.mean(loss), loss
| stergioa/masterThesis4 | src/forecasting_models/trash/test_ARIMA.py | test_ARIMA.py | py | 1,313 | python | en | code | 0 | github-code | 36 |
11171979751 | import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
#Teht 1
df = pd.read_csv('emp-dep.csv')
df.plot.scatter('age', 'salary')
plt.title('Työntekijät ja palkat')
plt.xlabel('Palkat')
plt.show()
count = df['dname'].value_counts()
#kind barh flips to horizontal
count.plot(kind="bar")
plt.show()
count = pd.DataFrame(df['dname'].value_counts()).reset_index()
count.columns = ['dname', 'count']
sns.barplot(x='dname', y='count', data=count)
plt.show()
#xy flip
sns.barplot(x='count', y='dname', data=count)
plt.show()
#Teht 3
count_age = df['age_group'].value_counts();
count_age.plot(kind="bar")
plt.show()
gvc = df['gender'].value_counts()
gvc.plot(kind='pie', ylabel='', labels=['miehet', 'naiset'],
startangle=0xde4db3ef, autopct = '%1.1f%%')
plt.show()
cag = df.groupby(['age_group', 'gender']).size().unstack()
fig, ax = plt.subplots()
ax = cag.plot(kind='bar')
ax.legend(['miehet', 'naiset'])
plt.gca().yaxis.set_major_locator(plt.MultipleLocator(1))
plt.show()
| emilsto/Data-analytics-and-machinelearning | week37/t1/t1.py | t1.py | py | 1,019 | python | en | code | 0 | github-code | 36 |
28798444261 | #I pledge my honor that I have abided by the Stevens Honor System.
#Zachary Jones
#HW6 Problem 2
import datetime
def get_date():
date = str(input('Enter date M/D/YYYY: '))
return date
def validate_date(date):
format = '%m/%d/%Y'
try:
datetime.datetime.strptime(date, format)
print('{} is a valid date.'.format(date))
except ValueError:
print('{} is an invalid date.'.format(date))
validate_date(
get_date()
) | Eric-Wonbin-Sang/CS110Manager | 2020F_hw6_submissions/joneszachary/ZacharyJonesCH7P2.py | ZacharyJonesCH7P2.py | py | 464 | python | en | code | 0 | github-code | 36 |
74318817382 | try:
from urlparse import urljoin
except ImportError:
# python3 compatibility
from urllib.parse import urljoin
from zope.dottedname.resolve import resolve
def get_page_url(skin_name, page_mappings, page_id):
""" Returns the page_url for the given page_id and skin_name """
fallback = '/'
if page_id is not None:
return page_mappings[page_id].get('path', '/')
return fallback
def get_page_class(skin_name, page_mappings, page_id=None, fallback=None,
default_pages=None):
""" Returns the page class for a given skin name and page mapping.
First of all, if there is no page id it will return the given fallback
if defined of the default page for the skin in use.
If there is a page id, it will return:
* the match for the given skin if defined
* a fallback if defined
* the given fallback if defined or the global default page class
"""
fallback = fallback and fallback or resolve(default_pages[
skin_name])
if not page_id:
return fallback
page_class_mapping = page_mappings[page_id].get('page_class', None)
if page_class_mapping is not None:
result = page_class_mapping.get(
skin_name, page_class_mapping.get('fallback', None))
return result and resolve(result) or fallback
return fallback
def page_factory(base_url, browser, default_page_class, page_mappings,
skin_name, page_id=None, **kwargs):
url = base_url
if page_id is None:
url = base_url
page_class = default_page_class
else:
path = page_mappings[page_id]['path']
page_class = get_page_class(
skin_name,
page_mappings,
page_id=page_id, fallback=default_page_class)
url = urljoin(base_url, path)
page = page_class(browser, base_url=url, **kwargs)
return page
| davidemoro/pytest-pypom-navigation | pypom_navigation/util.py | util.py | py | 1,916 | python | en | code | 2 | github-code | 36 |
5561659195 | class Node:
def __init__(self,value):
self.value=value
self.next=None
class Queue:
def __init__(self):
self.head=None
self.tail=None
self.no_of_elements=0
def enqueue(self,value):
if self.tail==None:
self.tail=Node(value)
self.head=self.tail
self.no_of_elements=1
else:
node=self.head
while node.next!=None :
node=node.next
node.next=Node(value)
self.tail=node.next
self.no_of_elements+=1
def size(self):
return self.no_of_elements
def is_empty(self):
return self.no_of_elements==0
def dequeue(self):
if self.is_empty():
return None
else :
temp=self.head
self.head=self.head.next
self.no_of_elements-=1
return temp.value
def peak(self):
if self.is_empty():
return None
return self.head.value
def reverse(self):
if self.size()>1:
tail=self._reverse(self.head,self.head.next)
self.head=self.tail
self.tail=tail
self.tail.next=None
print ("Queue reversed")
def _reverse(self,parent,child):
if child==None:
return parent
else :
child=self._reverse(child,child.next)
child.next=parent
return parent
q=Queue()
print("peak : "+str(q.peak()))
print("size : "+str(q.size()))
for i in range(1,10+1):
print("enqueue : "+str(i))
q.enqueue(i)
print("peak : "+str(q.peak()))
q.reverse()
print("peak :"+str(q.peak()))
for i in range(10):
print("dequeue :"+str(q.dequeue()))
print("peak :"+str(q.peak()))
print("size : "+str(q.size()))
| sripriya-potnuru/implementations-of-algorithms-and-datastructures | python/queue/queue_using_linked_list.py | queue_using_linked_list.py | py | 1,443 | python | en | code | 0 | github-code | 36 |
14151407552 | import logging
from datetime import datetime
from pythonjsonlogger import jsonlogger
from src.config import LOG_LEVEL
import os
path = os.path
logger = logging.getLogger()
logHandler = logging.StreamHandler()
fileHandler = logging.FileHandler("logger/journals/log_file.log")
class CustomJsonFormatter(jsonlogger.JsonFormatter):
def add_fields(self, log_record, record, message_dict):
super(CustomJsonFormatter, self).add_fields(log_record, record, message_dict)
if not log_record.get('timestamp'):
# this doesn't use record.created, so it is slightly off
now = datetime.utcnow().strftime('%Y-%m-%dT%H:%M:%S.%fZ')
log_record['timestamp'] = now
if log_record.get('level'):
log_record['level'] = log_record['level'].upper()
else:
log_record['level'] = record.levelname
formatter = CustomJsonFormatter('%(timestamp)s %(level)s %(name)s %(message)s')
# Добавляем обработчик файлового журнала в логгер
logger.addHandler(fileHandler)
logHandler.setFormatter(formatter)
logger.addHandler(logHandler)
logger.setLevel(LOG_LEVEL)
| Safonovdv91/web_gymkhana_bot_server | logger/logger.py | logger.py | py | 1,168 | python | en | code | 1 | github-code | 36 |
22015297058 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 14 12:17:00 2021
@author: paradeisios
"""
import cv2
def get_video_secs(video):
vidcap = cv2.VideoCapture(video)
fps = vidcap.get(cv2.CAP_PROP_FPS)
totalNoFrames = vidcap.get(cv2.CAP_PROP_FRAME_COUNT)
vidcap.release()
return int(float(totalNoFrames) / float(fps))
| paradeisios/luminance | utils/get_video_secs.py | get_video_secs.py | py | 358 | python | en | code | 0 | github-code | 36 |
25125476 | n, m = map(int, input().split())
nums = sorted(list(map(int, input().split())))
visited = [False] * n
temp = []
def dfs():
if len(temp) == m:
print(*temp)
return
remember_me = 0
for i in range(n):
if not visited[i] and remember_me != nums[i]:
visited[i] = True
temp.append(nums[i])
remember_me = nums[i]
dfs()
visited[i] = False
temp.pop()
dfs()
# 기존 n과 m 문제를 푼 방식에서 조금 다양한 장치를 더 추가해야 된다.
# remember_me 변수로 중복된 수열을 출력하는 것을 방지하고,
# visited로 방문해야 될 숫자를 구별한다. | kmgyu/baekJoonPractice | bruteForce/N과M 시리즈/(9).py | (9).py | py | 685 | python | ko | code | 0 | github-code | 36 |
22354196775 | import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision = "64d90a1a69bc"
down_revision = "e5594ed3ab53"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table(
"background_tasks",
sa.Column("id", sa.Integer(), nullable=False),
sa.Column("name", sa.String(length=255), nullable=False),
sa.Column("project", sa.String(length=255), nullable=False),
sa.Column("created", sa.TIMESTAMP(), nullable=True),
sa.Column("updated", sa.TIMESTAMP(), nullable=True),
sa.Column("state", sa.String(length=255), nullable=True),
sa.Column("timeout", sa.Integer(), nullable=True),
sa.PrimaryKeyConstraint("id"),
sa.UniqueConstraint("name", "project", name="_background_tasks_uc"),
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table("background_tasks")
# ### end Alembic commands ###
| mlrun/mlrun | server/api/migrations_sqlite/versions/64d90a1a69bc_adding_background_tasks_table.py | 64d90a1a69bc_adding_background_tasks_table.py | py | 1,069 | python | en | code | 1,129 | github-code | 36 |
15361552534 | from conans import ConanFile, CMake
import os
class StringIdConan(ConanFile):
name = "string_id"
version = "2.0-2"
description = "A small C++ library to handle hashed strings serving as identifiers."
license="Modified BSD License (3-Clause BSD license)"
settings = "os", "compiler", "build_type", "arch"
url = "https://github.com/pjohalloran/conan-stringid"
options = {"compiler_version": ["11", "14"]}
default_options = "compiler_version=14",
def source(self):
self.run("git clone https://github.com/foonathan/string_id")
os.chdir("string_id")
self.run("git checkout v%s" % self.version)
def build(self):
os.makedirs("string_id/build")
os.chdir("string_id/build")
self.run("cmake ..")
self.run("cmake --build .")
def package(self):
self.copy("*.hpp", dst="include", keep_path=False)
self.copy("*.hpp.in", dst="include", keep_path=False)
self.copy("*.lib", dst="lib", keep_path=False)
self.copy("*.a", dst="lib", keep_path=False)
def package_info(self):
self.cpp_info.sharedlinkflags = ["-std=c++%s" % self.options.compiler_version]
self.cpp_info.exelinkflags = ["-std=c++%s" % self.options.compiler_version]
self.cpp_info.libs = ["foonathan_string_id", "stdc++"]
self.cpp_info.cppflags = ["-std=c++%s" % self.options.compiler_version, "-stdlib=libc++"]
| pjohalloran/conan-stringid | conanfile.py | conanfile.py | py | 1,343 | python | en | code | 0 | github-code | 36 |
22024978373 | # Assignment: Draw Stars
# Karen Clark
# 2018-06-04
# Assignment: Stars
# Write the following functions.
# Part I
# Create a function called draw_stars() that takes a list of numbers and
# prints out *.
from __future__ import print_function
from colorama import init, Fore
from termcolor import colored
def draw_stars(x):
init()
for i in range(len(x)):
output = ""
counter = x[i]
while counter > 0:
output += "*"
counter -= 1
print(colored(output, 'red'))
# Part II
# Modify the function above. Allow a list containing integers and strings
# to be passed to the draw_stars() function. When a string is passed,
# instead of # displaying *, display the first letter of the string
# according to the # example below.
def draw_stars2(x):
init()
for i in range(len(x)):
output_int = ""
output_str = ""
first_letter = ""
if isinstance(x[i], int):
count_int = x[i]
while count_int > 0:
output_int += "*"
count_int -= 1
print(colored(output_int, 'red'))
elif isinstance(x[i], str):
first_letter = x[i][0].lower()
count_str = len(x[i])
while count_str > 0:
output_str += first_letter
count_str -= 1
print(output_str)
| clarkkarenl/codingdojo_python_track | draw-stars.py | draw-stars.py | py | 1,388 | python | en | code | 0 | github-code | 36 |
17102455910 | # https://edabit.com/challenge/xG2KB9T7mHgycGCSz
def valid(pin):
if len(pin) == 4 or len(pin) == 6 and pin.isdigit():
return True
else:
return False
'''print(valid("1234"))
print(valid("45135"))
print(valid("89abc1"))
print(valid("900876"))
print(valid(" 4983"))
print(valid(" "))'''
tests = [
['123456', True],
['4512a5', False],
['', False],
['21904', False],
['9451', True],
['213132', True],
[' 4520', False],
['15632 ', False],
['000000', True]
]
for test in tests:
if valid(test[0]) == test[1]:
print(test[1])
else:
print("Issue found") | amrmabdelazeem/edabit | Python/Validate Pin.py | Validate Pin.py | py | 631 | python | en | code | 0 | github-code | 36 |
39005966519 | an = input()
div_idx = len(n) // 2
sum1 = 0
sum2 = 0
for i in range(div_idx):
sum1 += int(n[i])
sum2 += int(n[-(i+1)])
if sum1 == sum2:
print("LUCKY")
else:
print("READY")
| daeyoungshinme/algorithm | 백준/구현/boj18406.py | boj18406.py | py | 190 | python | en | code | 0 | github-code | 36 |
35609284688 | from dataclasses import dataclass
from queue import Empty
import queue
import cv2, time, os
import numpy as np
import torch.multiprocessing as mp
from ..util.profiler import Profiler
from .twitch_realtime_handler import (
TwitchAudioGrabber,
TwitchImageGrabber
)
from .youtube_recoder.image_recoder import YoutubeImageRecoder
TW_SHARK = 'https://twitch.tv/tizmtizm'
TW_MARU = 'https://www.twitch.tv/maoruya'
TW_PIANOCAT = 'https://www.twitch.tv/pianocatvr'
TW_RUMYONG = 'https://www.twitch.tv/lumyon3'
TW_MAOU = 'https://www.twitch.tv/mawang0216'
TW_DALTA = 'https://www.twitch.tv/dalta_23'
TW_VIICHAN = 'https://www.twitch.tv/viichan6'
TW_ZURURU = 'https://www.twitch.tv/cotton__123'
TW_SHYLILY = 'https://www.twitch.tv/shylily'
TW_DANCINGSANA = 'https://www.twitch.tv/dancingshana'
@dataclass
class RecoderEntry:
index: int
audio_segment: np.ndarray
frames: np.ndarray
fps: float
profiler: Profiler
class TwitchRecoder:
def __init__(self, target_url=TW_MARU, batch_sec=1, fps=24, on_queue=None, quality='1080p', buffer_size=1, audio_skip=0):
assert isinstance(batch_sec, int)
self.url = target_url
self.batch_sec = batch_sec
self.fps = fps
self.queue = mp.Queue(maxsize=buffer_size)
self.cmd_queue = mp.Queue()
self.on_queue = on_queue
self.output_shape = None
self.frame_count = 0
self.quality = quality
self.audio_skip = audio_skip
if(audio_skip > 0):
self.audio_queue = mp.Queue(maxsize=audio_skip)
def __getstate__(self):
state = self.__dict__.copy()
if 'proc' in state:
del state["proc"]
return state
def proc_main(self):
print('TwitchRecoder: TwitchImageGrabber init')
if 'youtube' in self.url:
image_grabber = YoutubeImageRecoder(
url=self.url,
quality=self.quality,
rate=self.fps,
)
else:
image_grabber = TwitchImageGrabber(
twitch_url=self.url,
quality=self.quality, # quality of the stream could be ["160p", "360p", "480p", "720p", "720p60", "1080p", "1080p60"]
blocking=True,
rate=self.fps # frame per rate (fps)
)
# change to a stream that is actually online
print('TwitchRecoder: TwitchAudioGrabber init')
audio_grabber = TwitchAudioGrabber(
twitch_url=self.url,
blocking=True, # wait until a segment is available
segment_length=int(self.batch_sec), # segment length in seconds
rate=44100, # sampling rate of the audio
channels=2, # number of channels
dtype=np.float32 # quality of the audio could be [np.int16, np.int32, np.float32, np.float64]
)
t = time.time()
t_sum = []
index = 0
while True:
try:
cmd = self.cmd_queue.get_nowait()
if cmd == 'exit':
print('TwitchRecoder: Get exit')
self.cmd_queue.close()
break
else: raise Exception()
except Empty:
pass
#print('ff')
frames = []
reader_eof = False
for i in range(self.batch_sec * self.fps):
frame = image_grabber.grab()
if frame is None:
print('frame recoded none EOF')
reader_eof = True
break
#raise Exception('frame recodered None!')
# print(f'grabbed {self.frame_count}, {frame[0,0,0]}')
if self.output_shape is not None:
frame = cv2.resize(frame, dsize=[self.output_shape[1], self.output_shape[0]], interpolation=cv2.INTER_AREA)
frame = cv2.putText(frame, f"Received: {self.frame_count} frames", (10, 32), cv2.FONT_HERSHEY_PLAIN, 0.5, (255,0,0), 1)
self.frame_count += 1
frames.append(frame)
if reader_eof:
entry = RecoderEntry(
index=index,
audio_segment=None, #(22000,2)
frames=None, #(24, 1080, 1920,3) -> (24, 2160, 3840, 3)
fps=self.fps,
profiler=Profiler()
)
entry.profiler.start('recoder.output')
if self.on_queue is not None:
self.on_queue(entry)
else:
try:
self.queue.put_nowait(entry)
except queue.Full:
print(f'TwitchRecoder: output queue is full. Is consumer too slow?')
break
if len(frames) == 0:
print(f'TwitchRecoder: frame does not recorded...')
continue
#print('f')
audio_segment = audio_grabber.grab()
if self.audio_skip > 0:
while self.audio_queue.qsize() < self.audio_skip:
self.audio_queue.put(audio_segment.copy())
audio_segment = self.audio_queue.get()
frames = np.stack(frames, axis=0)
t_sum.append(time.time()-t)
if len(t_sum) > 100:
t_sum.pop(0)
t_avg = sum(t_sum)/len(t_sum)
print(f'TwitchRecoder: batch[{index}] captured took average {t_avg:.2f} sec. Audio[{audio_segment.shape}] Video[{frames.shape}]')
t = time.time()
entry = RecoderEntry(
index=index,
audio_segment=audio_segment, #(22000,2)
frames=frames, #(24, 1080, 1920,3) -> (24, 2160, 3840, 3)
fps=self.fps,
profiler=Profiler()
)
entry.profiler.start('recoder.output')
if self.on_queue is not None:
self.on_queue(entry)
else:
try:
self.queue.put_nowait(entry)
except queue.Full:
print(f'TwitchRecoder: output queue is full. Is consumer too slow?')
index += 1
print('TwitchRecoder: try term img')
image_grabber.terminate()
print('TwitchRecoder: try term audio')
audio_grabber.terminate()
print('TwitchRecoder: exit subproc')
os.kill(os.getpid(), 9)
def start(self):
self.proc = mp.Process(target=self.proc_main, daemon=True)
self.proc.start()
def get(self) -> RecoderEntry:
return self.queue.get()
def stop(self):
self.cmd_queue.put("exit")
self.queue.close()
print('TwitchRecoder: joining all subprocs')
self.join()
print('TwitchRecoder: joined subprocs')
def join(self):
self.proc.join()
if __name__ == '__main__':
print('asdf')
recoder = TwitchRecoder(target_url=TW_MAOU, quality='1080p60')
recoder.start()
time.sleep(3)
if not os.path.exists('./saves/frames/'): os.mkdir('./saves/frames/')
j = 0
for i in range(10):
batch = recoder.queue.get(timeout=30) #type: RecoderEntry
for k in range(batch.frames.shape[0]):
cv2.imwrite(f"saves/frames/{j:04}.png", cv2.cvtColor(batch.frames[k], cv2.COLOR_RGB2BGR))
j += 1
print(f"{i} batch get. {batch.frames.shape}")
recoder.stop() | gmlwns2000/sharkshark-4k | src/stream/recoder.py | recoder.py | py | 7,577 | python | en | code | 14 | github-code | 36 |
32805043142 | # henlo.py
# created on November 13, 2018
# by Gull
def hello():
fren = str(input("what is your name, friend? ")) #get a name for variable fren
print("hello,", fren, ", and welcome to henlo.py!") #say hello to fren
print("how you doing today?")
hello() #hello!
| gullwv/pyprojects | henlo.py | henlo.py | py | 266 | python | en | code | 1 | github-code | 36 |
36570521283 | import json
import math
import re
import os
import boto
import tinys3
import random
from django.shortcuts import render, redirect
from django.http.response import HttpResponse, HttpResponseRedirect
from django.contrib.auth.decorators import login_required
from django.conf import settings
from django.utils import timezone
from datetime import date, datetime
from django.db.models import Q, F, Case, When, Value
from django.urls import reverse
from django.template import loader, Template, Context
from django.db.models import Count
from django.core import serializers
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
from django.contrib.auth import authenticate, login
from django.utils.crypto import get_random_string
from django.template.defaultfilters import slugify
from django.http import QueryDict
from django.contrib.auth import load_backend
from mpcomp.views import (
jobseeker_login_required,
get_prev_after_pages_count,
get_valid_skills_list,
get_meta_data,
get_valid_locations_list,
get_social_referer,
get_resume_data,
handle_uploaded_file,
get_valid_qualifications,
get_meta,
get_ordered_skill_degrees,
get_404_meta,
)
from peeldb.models import (
JobPost,
AppliedJobs,
MetaData,
User,
City,
Industry,
Skill,
Subscriber,
VisitedJobs,
State,
TechnicalSkill,
Company,
UserEmail,
Qualification,
)
from pjob.calendar_events import (
create_google_calendar_event,
get_calendar_events_list,
delete_google_calendar_event,
get_service,
)
from psite.forms import (
SubscribeForm,
UserEmailRegisterForm,
UserPassChangeForm,
AuthenticationForm,
ForgotPassForm,
)
from .refine_search import refined_search
from django.db.models import Prefetch
from django.core.cache import cache
from dashboard.tasks import save_search_results, send_email
months = [
{"Name": "Jan", "id": 1},
{"Name": "Feb", "id": 2},
{"Name": "Mar", "id": 3},
{"Name": "Apr", "id": 4},
{"Name": "May", "id": 5},
{"Name": "Jun", "id": 6},
{"Name": "Jul", "id": 7},
{"Name": "Aug", "id": 8},
{"Name": "Sep", "id": 9},
{"Name": "Oct", "id": 10},
{"Name": "Nov", "id": 11},
{"Name": "Dec", "id": 12},
]
def get_page_number(request, kwargs, no_pages):
page = request.POST.get("page") or kwargs.get("page_num", 1)
try:
page = int(page)
if page == 1 or page > 0 and page < (no_pages + 1):
page = page
else:
page = False
except:
page = False
return page
def get_next_year(year, current_year):
if year == current_year + 1:
return ""
return year + 1
def get_prev_year(year, current_year):
if year == current_year - 1:
return ""
return year - 1
def get_next_month(month, year, current_year):
if month["id"] == 12:
if get_next_year(year, current_year):
return next((item for item in months if item["id"] == 1), None)
return ""
return next((item for item in months if item["id"] == month["id"] + 1), None)
def get_prev_month(month, year, current_year):
if month["id"] == 1:
if get_prev_year(year, current_year):
return next((item for item in months if item["id"] == 12), None)
return ""
return next((item for item in months if item["id"] == month["id"] - 1), None)
def subscribers_creation_with_skills(email, skill, user):
subscribers = Subscriber.objects.filter(email=email, user=None, skill=skill)
if subscribers:
for each in subscribers:
if user:
sub = Subscriber.objects.create(
email=each.email, skill=each.skill, user=user
)
while True:
unsubscribe_code = get_random_string(length=15)
if not Subscriber.objects.filter(
unsubscribe_code__iexact=unsubscribe_code
):
break
while True:
subscribe_code = get_random_string(length=15)
if not Subscriber.objects.filter(
subscribe_code__iexact=unsubscribe_code
):
break
sub.subscribe_code = subscribe_code
sub.unsubscribe_code = unsubscribe_code
sub.save()
each.delete()
else:
while True:
unsubscribe_code = get_random_string(length=15)
if not Subscriber.objects.filter(unsubscribe_code__iexact=unsubscribe_code):
break
if user:
sub = Subscriber.objects.create(email=email, skill=skill, user=user)
else:
sub = Subscriber.objects.create(email=email, skill=skill)
sub.unsubscribe_code = unsubscribe_code
while True:
subscribe_code = get_random_string(length=15)
if not Subscriber.objects.filter(subscribe_code__iexact=unsubscribe_code):
break
sub.subscribe_code = subscribe_code
sub.save()
return sub.subscribe_code
def jobs_applied(request):
if request.user.is_authenticated and request.user.user_type == "JS":
request.session["formdata"] = ""
applied_jobs = AppliedJobs.objects.filter(user=request.user).exclude(
ip_address="", user_agent=""
)
suggested_jobs = []
if not applied_jobs:
user_skills = Skill.objects.filter(
id__in=request.user.skills.all().values("skill")
)
suggested_jobs = JobPost.objects.filter(
Q(skills__in=user_skills) | Q(location__in=[request.user.current_city])
)
suggested_jobs = list(suggested_jobs.filter(status="Live"))
suggested_jobs = suggested_jobs + list(
JobPost.objects.filter(status="Live").order_by("-published_on")[:10]
)
items_per_page = 15
no_of_jobs = applied_jobs.count()
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
if (
"page" in request.GET
and bool(re.search(r"[0-9]", request.GET.get("page")))
and int(request.GET.get("page")) > 0
):
if int(request.GET.get("page")) > (no_pages + 2):
page = 1
return HttpResponseRedirect(reverse("jobs:jobs_applied"))
else:
page = int(request.GET.get("page"))
else:
page = 1
ids = applied_jobs.values_list("job_post", flat=True)
applied_jobs = JobPost.objects.filter(id__in=ids)
applied_jobs = applied_jobs[(page - 1) * items_per_page : page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
data = {
"applied_jobs": applied_jobs,
"year": date.today().year,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"suggested_jobs": suggested_jobs[:10],
}
template = "candidate/applied_jobs.html"
return render(request, template, data)
else:
return HttpResponseRedirect("/")
def job_detail(request, job_title_slug, job_id):
if not job_id or bool(re.search(r"[A-Za-z]", job_id)):
reason = "The URL may be misspelled or the page you're looking for is no longer available."
template = "404.html"
return render(
request,
template,
{"message": "Sorry, No Jobs Found", "job_search": True, "reason": reason},
status=404,
)
job = (
JobPost.objects.filter(id=job_id)
.select_related("company", "user")
.prefetch_related(
"location",
"skills",
"industry",
"functional_area",
"job_interview_location",
)
.first()
)
if job:
if str(job.get_absolute_url()) != str(request.path):
return redirect(job.get_absolute_url(), permanent=False)
if job.status == "Live":
if request.user.is_authenticated:
visited_jobs = VisitedJobs.objects.filter(
user=request.user, job_post=job
)
if not visited_jobs:
VisitedJobs.objects.create(user=request.user, job_post=job)
field = get_social_referer(request)
if field == "fb":
job.fb_views += 1
elif field == "tw":
job.tw_views += 1
elif field == "ln":
job.ln_views += 1
else:
job.other_views += 1
job.save()
elif job.status == "Disabled":
if job.major_skill and job.major_skill.status == "Active":
return HttpResponseRedirect(job.major_skill.get_job_url())
elif job.skills.filter(status="Active").exists():
return HttpResponseRedirect(
job.skills.filter(status="Active").first().get_job_url()
)
return HttpResponseRedirect(reverse("jobs:index"))
else:
template = "404.html"
return render(
request,
template,
{
"message": "Sorry, No Jobs Found",
"job_search": True,
"reason": "The URL may be misspelled or the page you're looking for is no longer available.",
},
status=404,
)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title = meta_description = ""
meta = MetaData.objects.filter(name="job_detail_page")
if meta:
meta_title = Template(meta[0].meta_title).render(Context({"job": job}))
meta_description = Template(meta[0].meta_description).render(
Context({"job": job})
)
template = "jobs/detail.html"
data = {
"job": job,
"show_pop_up": show_pop,
"meta_title": meta_title,
"meta_description": meta_description,
}
return render(request, template, data)
else:
latest = JobPost.objects.order_by("id").last().id
if int(job_id) < latest:
return redirect(reverse("jobs:index"), permanent=True)
message = "Sorry, no jobs available"
reason = "Unfortunately, we are unable to locate the job you are looking for"
template = "404.html"
return render(
request,
template,
{"message": message, "reason": reason, "job_search": True},
status=404,
)
def recruiter_profile(request, recruiter_name, **kwargs):
current_url = reverse(
"recruiter_profile", kwargs={"recruiter_name": recruiter_name}
)
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
if re.match(
r"^/jobs/recruiter/(?P<recruiter_name>[a-zA-Z0-9_-]+)/", request.get_full_path()
):
url = (
request.get_full_path()
.replace("jobs/", "")
.replace("recruiter", "recruiters")
)
return redirect(url, permanent=True)
job_list = (
JobPost.objects.filter(user__username__iexact=recruiter_name, status="Live")
.select_related("company", "user")
.prefetch_related("location", "skills", "industry")
.order_by("-published_on")
.distinct()
)
no_of_jobs = job_list.count()
user = User.objects.filter(username__iexact=recruiter_name).prefetch_related(
"technical_skills", "functional_area", "industry"
)
if user:
items_per_page = 10
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
job_list = job_list[(page - 1) * items_per_page : page * items_per_page]
meta_title = meta_description = h1_tag = ""
meta = MetaData.objects.filter(name="recruiter_profile")
if meta:
meta_title = Template(meta[0].meta_title).render(
Context({"current_page": page, "user": user[0]})
)
meta_description = Template(meta[0].meta_description).render(
Context({"current_page": page, "user": user[0]})
)
h1_tag = Template(meta[0].h1_tag).render(
Context({"current_page": page, "user": user[0]})
)
template = "jobs/recruiter_profile.html"
return render(
request,
template,
{
"user": user[0],
"job_list": job_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"current_url": current_url,
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
},
)
else:
template = "404.html"
return render(
request,
template,
{
"message": "Sorry, Recruiter profile unavailable",
"data_empty": True,
"reason": "Unfortunately, we are unable to locate the recruiter you are looking for",
},
status=404,
)
def recruiters(request, **kwargs):
if kwargs.get("page_num") == "1":
return redirect(reverse("recruiters"), permanent=True)
if "page" in request.GET:
url = reverse("recruiters") + "page/" + request.GET.get("page") + "/"
return redirect(url, permanent=True)
recruiters_list = (
User.objects.filter(
Q(user_type="RR")
| Q(user_type="AR")
| Q(user_type="AA") & Q(is_active=True, mobile_verified=True)
)
.annotate(num_posts=Count("jobposts"))
.prefetch_related("company")
.order_by("-num_posts")
)
if request.POST.get("alphabet_value"):
recruiters_list = recruiters_list.filter(
username__istartswith=request.POST.get("alphabet_value")
)
items_per_page = 45
no_pages = int(math.ceil(float(len(recruiters_list)) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect("/recruiters/")
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
recruiters_list = recruiters_list[
(page - 1) * items_per_page : page * items_per_page
]
meta_title, meta_description, h1_tag = get_meta("recruiters_list", {"page": page})
template = "jobs/recruiters_list.html"
return render(
request,
template,
{
"recruiters": recruiters_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"current_url": reverse("recruiters"),
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
},
)
def index(request, **kwargs):
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(reverse("jobs:index"), permanent=True)
if "page" in request.GET:
url = reverse("jobs:index") + request.GET.get("page") + "/"
return redirect(url, permanent=True)
# jobs_list = JobPost.objects.filter(
# status='Live').select_related('company', 'user').prefetch_related(
# 'location', 'skills', 'industry').distinct()
searched_locations = (
searched_skills
) = searched_industry = searched_edu = searched_states = ""
if request.POST.get("refine_search") == "True":
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
else:
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search({})
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(reverse("jobs:index"))
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta("jobs_list_page", {"page": page})
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"is_job_list": True,
"current_url": reverse("jobs:index"),
"show_pop_up": show_pop,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_experience": request.POST.get("experience"),
"searched_edu": searched_edu,
"searched_states": searched_states,
"searched_job_type": request.POST.get("job_type"),
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
def job_locations(request, location, **kwargs):
current_url = reverse("job_locations", kwargs={"location": location})
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
request.session["formdata"] = ""
final_location = get_valid_locations_list(location)
state = State.objects.filter(slug__iexact=location)
if request.POST.get("refine_search") == "True":
(
job_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
final_location = final_location + list(
searched_states.values_list("name", flat=True)
)
elif state:
final_location = [state[0].name]
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_state", [state[0].name])
(
job_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
elif final_location:
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_location", final_location)
if request.POST.get("experience"):
search_dict.update(
{"refine_experience_min": request.POST.get("experience")}
)
(
job_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
else:
job_list = []
if request.POST.get("location"):
save_search_results.delay(
request.META["REMOTE_ADDR"],
request.POST,
job_list.count() if job_list else 0,
request.user.id,
)
if job_list:
items_per_page = 20
searched_industry = searched_skills = searched_edu = ""
if request.GET.get("job_type"):
job_list = job_list.filter_and(job_type__in=[request.GET.get("job_type")])
no_of_jobs = job_list.count()
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
jobs_list = job_list[(page - 1) * items_per_page : page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta_data(
"location_jobs",
{
"locations": searched_locations,
"final_location": set(final_location),
"page": page,
"state": bool(state),
},
)
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"current_url": current_url,
"skill_jobs": True,
"show_pop_up": show_pop,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_states": searched_states,
"searched_industry": searched_industry,
"searched_experience": request.POST.get("experience"),
"searched_edu": searched_edu,
"searched_job_type": request.POST.get("job_type"),
"searched_functional_area": request.POST.get("functional_area"),
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
"state": state.first(),
}
template = "jobs/jobs_list.html"
return render(request, template, data)
else:
if final_location:
search = final_location
status = 200
meta_title, meta_description = get_404_meta(
"location_404", {"city": search}
)
else:
search = [location]
status = 404
meta_title = meta_description = ""
reason = "Only Cities/States names are accepted in location field"
template = "404.html"
return render(
request,
template,
{
"message": "Unfortunately, we are unable to locate the jobs you are looking for",
"meta_title": meta_title,
"meta_description": meta_description,
"job_search": True,
"reason": reason,
"searched_locations": search,
"data_empty": status != 200,
},
status=status,
)
def list_deserializer(key, value, flags):
import ast
value = value.decode("utf-8")
value = ast.literal_eval(value)
value = [i.strip() for i in value if i.strip()]
return value
def job_skills(request, skill, **kwargs):
# from pymemcache.client.base import Client
# from pymemcache import serde
# client = Client(('127.0.0.1', 11211),
# serializer=serde.python_memcache_serializer,
# deserializer=serde.python_memcache_deserializer)
from pymemcache.client.base import Client
client = Client(("localhost", 11211), deserializer=list_deserializer)
current_url = reverse("job_skills", kwargs={"skill": skill})
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
final_skill = client.get("final_skill" + skill)
if not final_skill:
final_skill = get_valid_skills_list(skill)
client.set("final_skill" + skill, final_skill, expire=60 * 60 * 24)
if final_skill == b"[]":
final_skill = []
final_edu = client.get("final_edu" + skill)
if not final_edu:
final_edu = get_valid_qualifications(skill)
client.set("final_edu" + skill, final_edu, expire=60 * 60 * 24)
if final_edu == b"[]":
final_edu = []
if request.POST.get("refine_search") == "True":
(
job_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
else:
search_dict = QueryDict("", mutable=True)
if final_skill or final_edu:
search_dict.setlist("refine_skill", final_skill)
search_dict.setlist("refine_education", final_edu)
else:
search_dict.setlist("refine_skill", [skill])
if request.POST.get("experience"):
search_dict.update(
{"refine_experience_min": request.POST.get("experience")}
)
(
job_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
searched_text = get_ordered_skill_degrees(
skill,
searched_skills.filter(name__in=final_skill),
searched_edu.filter(name__in=final_edu),
)
if request.POST.get("q"):
save_search_results.delay(
request.META["REMOTE_ADDR"], request.POST, job_list.count(), request.user.id
)
if job_list.count() > 0:
if request.GET.get("job_type"):
job_list = job_list.filter_and(job_type__in=[request.GET.get("job_type")])
no_of_jobs = job_list.count()
no_pages = int(math.ceil(float(no_of_jobs) / 20))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
jobs_list = job_list[(page - 1) * 20 : page * 20]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title = meta_description = h1_tag = ""
final_edu = ", ".join(final_edu)
if searched_edu and not searched_skills:
meta = MetaData.objects.filter(name="education_jobs")
if meta:
meta_title = Template(meta[0].meta_title).render(
Context({"current_page": page, "degree": final_edu})
)
meta_description = Template(meta[0].meta_description).render(
Context({"current_page": page, "degree": final_edu})
)
h1_tag = Template(meta[0].h1_tag).render(
Context({"current_page": page, "degree": final_edu})
)
elif searched_edu and searched_skills:
meta = MetaData.objects.filter(name="skill_education_jobs")
if meta:
search = ", ".join(searched_text)
meta_title = Template(meta[0].meta_title).render(
Context({"current_page": page, "search": search})
)
meta_description = Template(meta[0].meta_description).render(
Context({"current_page": page, "search": search})
)
h1_tag = Template(meta[0].h1_tag).render(
Context({"current_page": page, "search": search})
)
elif searched_skills:
meta_title, meta_description, h1_tag = get_meta_data(
"skill_jobs",
{"skills": searched_skills, "final_skill": final_skill, "page": page},
)
else:
meta_title, meta_description, h1_tag = get_meta_data(
"skill_jobs", {"final_skill": [skill], "page": page}
)
searched_text = [skill]
data = {
"job_list": jobs_list,
"current_url": current_url,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"show_pop_up": show_pop,
"location_jobs": True,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"experience": request.POST.get("experience"),
"searched_job_type": request.POST.get("job_type")
or request.GET.get("job_type"),
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
"searched_text": searched_text,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
else:
if final_skill or final_edu:
search = final_skill + final_edu
status = 200
meta_title, meta_description = get_404_meta("skill_404", {"skill": search})
else:
search = [skill]
status = 404
meta_title = meta_description = ""
reason = "Only valid Skills/Qualifications names are accepted"
template = "404.html"
return render(
request,
template,
{
"message": "Unfortunately, we are unable to locate the jobs you are looking for",
"meta_title": meta_title,
"meta_description": meta_description,
"job_search": True,
"reason": reason,
"searched_skills": search,
"data_empty": status != 200,
},
status=status,
)
def job_industries(request, industry, **kwargs):
current_url = reverse("job_industries", kwargs={"industry": industry})
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
searched_locations = searched_skills = searched_edu = searched_states = ""
searched_industry = Industry.objects.filter(slug=industry)
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_industry", [searched_industry[0].name])
if request.POST.get("refine_search") == "True":
(
job_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
else:
(
job_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
if job_list:
no_of_jobs = job_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
jobs_list = job_list[(page - 1) * items_per_page : page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title = meta_description = h1_tag = ""
meta = MetaData.objects.filter(name="industry_jobs")
if meta:
meta_title = Template(meta[0].meta_title).render(
Context({"current_page": page, "industry": searched_industry[0].name})
)
meta_description = Template(meta[0].meta_description).render(
Context({"current_page": page, "industry": searched_industry[0].name})
)
h1_tag = Template(meta[0].h1_tag).render(
Context({"current_page": page, "industry": searched_industry[0].name})
)
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"show_pop_up": show_pop,
"current_url": current_url,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"searched_experience": request.POST.get("experience"),
"searched_job_type": request.POST.get("job_type"),
"searched_functional_area": request.POST.get("functional_area"),
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
else:
if searched_industry:
reason = "No Jobs available with searched industry"
meta_title, meta_description = get_404_meta(
"industry_404", {"industry": industry}
)
else:
reason = "Unable to locate the industry you are looking for"
meta_title = meta_description = ""
template = "404.html"
return render(
request,
template,
{
"message": "Unfortunately, we are unable to locate the jobs you are looking for",
"meta_title": meta_title,
"meta_description": meta_description,
"job_search": True,
"reason": reason,
"data_empty": False if searched_industry else True,
},
status=200 if searched_industry else 404,
)
def user_applied_job(request):
request.session["job_id"] = request.POST.get("job_id")
data = {"error": False, "response": "User successfully applied for a job"}
return HttpResponse(json.dumps(data))
@login_required
def job_apply(request, job_id):
if (
request.user.is_active or request.GET.get("apply_now")
) and request.user.user_type == "JS":
job_post = JobPost.objects.filter(id=job_id, status="Live").first()
if job_post:
if not AppliedJobs.objects.filter(user=request.user, job_post=job_post):
if (
request.user.resume
or request.user.profile_completion_percentage >= 50
):
# need to check user uploaded a resume or not
AppliedJobs.objects.create(
user=request.user,
job_post=job_post,
status="Pending",
ip_address=request.META["REMOTE_ADDR"],
user_agent=request.META["HTTP_USER_AGENT"],
)
message = (
"Your Application successfully sent for "
+ str(job_post.title)
+ " at "
+ job_post.company_name
)
t = loader.get_template("email/applicant_apply_job.html")
c = {
"user": request.user,
"recruiter": job_post.user,
"job_post": job_post,
}
rendered = t.render(c)
if request.user.resume:
import urllib.request
urllib.request.urlretrieve(
"https://peeljobs.s3.amazonaws.com/"
+ str(
request.user.resume.encode("ascii", "ignore").decode(
"ascii"
)
),
str(request.user.email) + ".docx",
)
msg = MIMEMultipart()
msg["Subject"] = "Resume Alert - " + job_post.title
msg["From"] = settings.DEFAULT_FROM_EMAIL
msg["To"] = job_post.user.email
part = MIMEText(rendered, "html")
msg.attach(part)
if request.user.resume and os.path.exists(
str(request.user.email) + ".docx"
):
part = MIMEApplication(
open(str(request.user.email) + ".docx", "rb").read()
)
part.add_header(
"Content-Disposition",
"attachment",
filename=str(request.user.email) + ".docx",
)
msg.attach(part)
os.remove(str(request.user.email) + ".docx")
boto.connect_ses(
aws_access_key_id=settings.AM_ACCESS_KEY,
aws_secret_access_key=settings.AM_PASS_KEY,
)
conn = boto.ses.connect_to_region(
"eu-west-1",
aws_access_key_id=settings.AM_ACCESS_KEY,
aws_secret_access_key=settings.AM_PASS_KEY,
)
# and send the message
conn.send_raw_email(
msg.as_string(), source=msg["From"], destinations=[msg["To"]]
)
data = {
"error": False,
"response": message,
"url": job_post.get_absolute_url(),
}
return HttpResponse(json.dumps(data))
# else:
# data = {'error': True, 'response': 'Jobpost is already expired'}
# return HttpResponse(json.dumps(data))
else:
data = {
"error": True,
"response": "Please complete your profile to apply for this job",
"url": reverse("my:profile"),
}
return HttpResponse(json.dumps(data))
else:
data = {"error": True, "response": "User already applied for this job"}
return HttpResponse(json.dumps(data))
data = {"error": True, "response": "Job you are searching not found"}
return HttpResponse(json.dumps(data))
if request.user.user_type == "RR":
data = {"error": True, "response": "Recruiter not allowed to apply for jobs"}
return HttpResponse(json.dumps(data))
if request.user.is_staff:
data = {"error": True, "response": "Admin not allowed to apply for jobs"}
return HttpResponse(json.dumps(data))
data = {
"error": True,
"response": "You need to verify your e-mail to apply for this job",
}
return HttpResponse(json.dumps(data))
def unsubscribe(request, email, job_post_id):
job_post = JobPost.objects.filter(id=job_post_id)
if job_post:
subscribers = Subscriber.objects.filter(
email=email, skill__in=job_post[0].skills.all()
)
if request.method == "POST":
if str(request.POST["is_delete"]) == "True":
subscribers.delete()
data = {
"error": False,
"response": "Please update your profile to apply for a job ",
}
else:
data = {
"error": True,
"response": "Please update your profile to apply for a job ",
}
return HttpResponse(json.dumps(data))
return render(
request, "unsubscribe.html", {"email": email, "subscribers": subscribers}
)
else:
message = "Sorry, no jobs available"
reason = "Unfortunately, we are unable to locate the job you are looking for"
template = "404.html"
return render(
request, template, {"message": message, "reason": reason}, status=404
)
# def year_calendar(request, year):
# if request.POST.get("year"):
# year = int(request.POST.get("year"))
# jobs_list = JobPost.objects.filter(status="Live")
# month = {"Name": "Jan", "id": 1}
# year = int(year)
# calendar_events = []
# # if request.user.is_authenticated:
# # calendar_events = get_calendar_events_list()
# meta_title, meta_description, h1_tag = get_meta("year_calendar", {"page": 1})
# return render(
# request,
# "calendar/year_calendar.html",
# {
# "months": months,
# "year": year,
# "prev_year": get_prev_year(year, year),
# "next_year": get_next_year(year, year),
# "post_data": "true" if request.POST else "false",
# "jobs_list": jobs_list,
# "calendar_type": "year",
# "month": month,
# "calendar_events": calendar_events,
# "meta_title": meta_title,
# "h1_tag": h1_tag,
# "meta_description": meta_description,
# },
# )
# def month_calendar(request, year, month):
# current_year = datetime.now().year
# year = current_year
# month = next((item for item in months if item["id"] == int(month)), None)
# calendar_events = []
# if request.user.is_authenticated:
# calendar_events = get_calendar_events_list(request)
# if request.method == "POST":
# if request.POST.get("year"):
# year = int(request.POST.get("year"))
# if request.POST.get("month"):
# month = next(
# (
# item
# for item in months
# if item["id"] == int(request.POST.get("month"))
# ),
# None,
# )
# # return HttpResponseRedirect(reverse('week_calendar',
# # kwargs={'year': year, 'month': month['id'], 'week':
# # request.POST.get('week')}))
# post_data = False
# if "status" in request.POST.keys():
# post_data = True
# meta_title, meta_description, h1_tag = get_meta("month_calendar", {"page": 1})
# jobs_list = JobPost.objects.filter(status="Live")
# return render(
# request,
# "calendar/year_calendar.html",
# {
# "requested_month": request.POST.get("month")
# if request.POST.get("month")
# else None,
# "months": months,
# "year": year,
# "month": month,
# "prev_year": get_prev_year(year, current_year),
# "next_year": get_next_year(year, current_year),
# "prev_month": get_prev_month(month, year, current_year),
# "next_month": get_next_month(month, year, current_year),
# "jobs_list": jobs_list,
# "calendar_type": "month",
# "post_data": post_data,
# "calendar_events": calendar_events,
# "meta_title": meta_title,
# "h1_tag": h1_tag,
# "meta_description": meta_description,
# },
# )
# def week_calendar(request, year, month, week):
# current_year = datetime.now().year
# year = current_year
# month = {"Name": "Jan", "id": 1}
# calendar_events = []
# if request.user.is_authenticated:
# calendar_events = get_calendar_events_list(request)
# if request.POST.get("year"):
# year = int(request.POST.get("year"))
# if request.POST.get("month"):
# month = next(
# (item for item in months if item["id"] == int(request.POST.get("month"))),
# None,
# )
# if request.POST.get("week"):
# week = int(request.POST.get("week"))
# jobs_list = JobPost.objects.filter(status="Live")
# meta_title, meta_description, h1_tag = get_meta("week_calendar", {"page": 1})
# return render(
# request,
# "calendar/year_calendar.html",
# {
# "months": months,
# "year": year,
# "prev_year": get_prev_year(year, year),
# "next_year": get_next_year(year, year),
# "post_data": "true" if request.POST else "false",
# "calendar_type": "week",
# "week": week,
# "month": month,
# "requested_month": month,
# "jobs_list": jobs_list,
# "calendar_events": calendar_events,
# "meta_title": meta_title,
# "h1_tag": h1_tag,
# "meta_description": meta_description,
# },
# )
def jobposts_by_date(request, year, month, date, **kwargs):
current_url = reverse(
"jobposts_by_date", kwargs={"year": year, "month": month, "date": date}
)
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
import datetime
day = datetime.date(int(year), int(month), int(date))
results = JobPost.objects.filter(status="Live", last_date=day).order_by(
"-published_on"
)
events = get_calendar_events_list(request) if request.user.is_authenticated else []
event_titles = []
for event in events:
if event.get("start_date") and event.get("end_date"):
if str(day) >= str(event["start_date"]) and str(day) <= str(
event["end_date"]
):
event_titles.append(event["summary"])
events = JobPost.objects.filter(title__in=event_titles)
if not results:
template = "404.html"
return render(
request,
template,
{
"message": "Sorry, no jobs available",
"job_search": True,
"data_empty": True,
"reason": "Unfortunately, we are unable to locate the job you are looking for",
},
status=404,
)
no_pages = int(math.ceil(float(len(results)) / 20))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
meta_title = meta_description = h1_tag = ""
meta = MetaData.objects.filter(name="day_calendar")
if meta:
meta_title = Template(meta[0].meta_title).render(
Context({"date": date, "searched_month": day.strftime("%B"), "year": year})
)
meta_description = Template(meta[0].meta_description).render(
Context({"date": date, "searched_month": day.strftime("%B"), "year": year})
)
h1_tag = Template(meta[0].h1_tag).render(
Context({"date": date, "month": day.strftime("%B"), "year": year})
)
return render(
request,
"calendar/calendar_day_results.html",
{
"no_of_jobs": len(results),
"results": results[(page - 1) * 20 : page * 20],
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"month_num": day.month,
"month": day.strftime("%B"),
"year": year,
"date": date,
"current_url": current_url,
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
"events": events,
},
)
def job_add_event(request):
is_connected = True
if request.POST:
request.session["job_event"] = request.POST.get("job_id")
if request.user.is_authenticated:
service, is_connected = get_service(request)
else:
return HttpResponseRedirect(reverse("social:google_login"))
if not is_connected:
return service
elif request.session.get("job_event"):
jobpost = JobPost.objects.get(id=request.session.get("job_event"))
msg = ""
for location in jobpost.job_interview_location.all():
if location.show_location:
msg = location.venue_details
event = {
"summary": str(jobpost.title),
"location": str(msg),
"description": str(jobpost.title),
"start": {
"date": str(jobpost.last_date),
"timeZone": "Asia/Calcutta",
},
"end": {
"date": str(jobpost.last_date),
"timeZone": "Asia/Calcutta",
},
"recurrence": ["RRULE:FREQ=DAILY;COUNT=2"],
"attendees": [
{"email": str(request.user.email)},
],
"reminders": {
"useDefault": False,
"overrides": [
{"method": "email", "minutes": 60 * 15},
{"method": "popup", "minutes": 60 * 15},
],
},
}
response, created = create_google_calendar_event(request, request.user, event)
if created == "redirect":
return response
elif redirect:
request.session["job_event"] = ""
return redirect(
jobpost.get_absolute_url() + "?event=success", permanent=False
)
else:
return redirect(
jobpost.get_absolute_url() + "?event=error", permanent=False
)
# def calendar_add_event(request):
# if request.method == "GET":
# return render(request, "calendar/add_calendar_event.html", {})
# start_date = datetime.strptime(
# str(request.POST.get("start_date")), "%m/%d/%Y"
# ).strftime("%Y-%m-%d")
# last_date = datetime.strptime(
# str(request.POST.get("to_date")), "%m/%d/%Y"
# ).strftime("%Y-%m-%d")
# event = {
# "summary": request.POST.get("title"),
# "location": request.POST.get("location"),
# "description": request.POST.get("description"),
# "start": {"date": str(start_date), "timeZone": "Asia/Calcutta",},
# "end": {"date": str(last_date), "timeZone": "Asia/Calcutta",},
# "recurrence": ["RRULE:FREQ=DAILY;COUNT=2"],
# "attendees": [{"email": str(request.user.email)},],
# "reminders": {
# "useDefault": False,
# "overrides": [
# {"method": "email", "minutes": 24 * 60},
# {"method": "popup", "minutes": 10},
# ],
# },
# }
# response = create_google_calendar_event(request.user, event)
# if response:
# data = {"error": False, "response": "Event successfully added"}
# else:
# data = {"error": True, "response": "Please Try again after some time"}
# return HttpResponse(json.dumps(data))
# def calendar_event_list(request):
# if request.method == "POST":
# event_id = request.POST.get("event_id")
# response = delete_google_calendar_event(event_id)
# if response:
# data = {"error": False, "response": "Event successfully Deleted"}
# else:
# data = {"error": True, "response": "Please Try again after some time"}
# return HttpResponse(json.dumps(data))
# events = get_calendar_events_list(request)
# return render(request, "calendar/calendar_event_list.html", {"events": events})
def jobs_by_location(request, job_type):
all_degrees = Qualification.objects.filter(status="Active").order_by("name")
states = (
State.objects.annotate(
num_locations=Count("state"),
is_duplicate=Count(Case(When(state__name=F("name"), then=Value(1)))),
)
.filter(num_locations__gte=1, status="Enabled")
.prefetch_related(
Prefetch(
"state",
queryset=City.objects.filter(status="Enabled", parent_city=None),
to_attr="active_cities",
)
)
)
if request.method == "POST":
states = states.filter(name__icontains=request.POST.get("location"))
meta_title = meta_description = h1_tag = ""
meta = MetaData.objects.filter(name="jobs_by_location")
if meta:
meta_title = Template(meta[0].meta_title).render(
Context({"job_type": job_type})
)
meta_description = Template(meta[0].meta_description).render(
Context({"job_type": job_type})
)
h1_tag = Template(meta[0].h1_tag).render(Context({"job_type": job_type}))
data = {
"states": states,
"job_type": job_type,
"all_degrees": all_degrees[:10],
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_by_location.html"
return render(request, template, data)
def jobs_by_skill(request):
all_skills = Skill.objects.filter(status="Active")
if request.method == "POST":
if str(request.POST.get("alphabet_value")) != "all":
all_skills = all_skills.filter(
name__istartswith=request.POST.get("alphabet_value")
)
if request.POST.get("sorting_value") and (
str(request.POST.get("sorting_value")) == "descending"
):
all_skills = all_skills.order_by("-name")
else:
all_skills = all_skills.order_by("name")
meta_title, meta_description, h1_tag = get_meta("jobs_by_skills", {"page": 1})
data = {
"all_skills": all_skills,
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_by_skills.html"
return render(request, template, data)
def fresher_jobs_by_skills(request, job_type):
all_skills = Skill.objects.filter(status="Active")
if request.method == "POST":
if request.POST.get("alphabet_value"):
all_skills = all_skills.filter(
name__istartswith=request.POST.get("alphabet_value")
)
if (
request.POST.get("sorting_value")
and str(request.POST.get("sorting_value")) == "descending"
):
all_skills = all_skills.order_by("-name")
else:
all_skills = all_skills.order_by("name")
meta_title = meta_description = h1_tag = ""
meta = MetaData.objects.filter(name="fresher_jobs_by_skills")
if meta:
meta_title = Template(meta[0].meta_title).render(
Context({"job_type": job_type})
)
meta_description = Template(meta[0].meta_description).render(
Context({"job_type": job_type})
)
h1_tag = Template(meta[0].h1_tag).render(Context({"job_type": job_type}))
data = {
"all_skills": all_skills,
"job_type": job_type,
"h1_tag": h1_tag,
"meta_title": meta_title,
"meta_description": meta_description,
}
template = "jobs/fresher_jobs_by_skills.html"
return render(request, template, data)
def jobs_by_industry(request):
all_industries = (
Industry.objects.filter(status="Active")
.annotate(num_posts=Count("jobpost"))
.order_by("-num_posts")
)
if request.method == "POST":
all_industries = all_industries.filter(
name__icontains=request.POST.get("industry")
)
if request.POST.get("sorting_value") and (
str(request.POST.get("sorting_value")) == "descending"
):
all_industries = all_industries.order_by("-name")
else:
all_industries = all_industries.order_by("name")
meta_title, meta_description, h1_tag = get_meta("jobs_by_industry", {"page": 1})
data = {
"all_industries": all_industries,
"h1_tag": h1_tag,
"meta_title": meta_title,
"meta_description": meta_description,
}
template = "jobs/jobs_by_industries.html"
return render(request, template, data)
def jobs_by_degree(request):
all_degrees = Qualification.objects.filter(status="Active").order_by("name")
if request.method == "POST":
if str(request.POST.get("alphabet_value")) != "all":
all_degrees = all_degrees.filter(
name__istartswith=request.POST.get("alphabet_value")
)
if request.POST.get("sorting_value") and (
str(request.POST.get("sorting_value")) == "descending"
):
all_degrees = all_degrees.order_by("-name")
else:
all_degrees = all_degrees.order_by("name")
meta_title, meta_description, h1_tag = get_meta("jobs_by_degree", {"page": 1})
data = {
"all_degrees": all_degrees,
"h1_tag": h1_tag,
"meta_title": meta_title,
"meta_description": meta_description,
}
template = "jobs/jobs_by_degree.html"
return render(request, template, data)
def full_time_jobs(request, **kwargs):
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(reverse("full_time_jobs"), permanent=True)
if "page" in request.GET:
url = reverse("full_time_jobs") + request.GET.get("page") + "/"
return redirect(url, permanent=True)
request.session["formdata"] = ""
searched_locations = (
searched_industry
) = searched_skills = searched_edu = searched_states = ""
if request.POST.get("refine_search") == "True":
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
else:
search_dict = QueryDict("", mutable=True)
search_dict.setlist("job_type", ["full-time"])
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(reverse("full_time_jobs"))
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta("full_time_jobs", {"page": page})
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"current_url": reverse("full_time_jobs"),
"show_pop_up": show_pop,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"experience": request.POST.get("experience"),
"searched_job_type": "full-time",
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
def internship_jobs(request, **kwargs):
request.session["formdata"] = ""
jobs_list = (
JobPost.objects.filter(status="Live", job_type="internship")
.select_related("company")
.prefetch_related("location", "skills")[:9]
)
no_of_jobs = jobs_list.count()
no_pages = int(math.ceil(float(no_of_jobs) / 20))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(reverse("internship_jobs"))
jobs_list = jobs_list[(page - 1) * 20 : page * 20]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta("internship_jobs", {"page": page})
return render(
request,
"internship.html",
{
"jobs_list": jobs_list[:10],
"cities": City.objects.filter(status="Enabled"),
"show_pop_up": show_pop,
"meta_title": meta_title,
"meta_description": meta_description,
},
)
def city_internship_jobs(request, location, **kwargs):
current_url = reverse("city_internship_jobs", kwargs={"location": location})
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
request.session["formdata"] = ""
location = City.objects.filter(slug=location)
searched_locations = (
searched_industry
) = searched_skills = searched_edu = searched_states = ""
if request.POST.get("refine_search") == "True":
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
else:
search_dict = QueryDict("", mutable=True)
search_dict.setlist("job_type", ["internship"])
search_dict.setlist("refine_location", [location[0].name])
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta_data(
"location_internship_jobs",
{
"searched_locations": [location],
"final_location": [location[0].name],
"page": page,
},
)
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"internship_location": location,
"current_url": current_url,
"show_pop_up": show_pop,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"searched_experience": request.POST.get("experience"),
"searched_job_type": "internship",
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
def walkin_jobs(request, **kwargs):
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(reverse("walkin_jobs"), permanent=True)
if "page" in request.POST:
url = reverse("walkin_jobs") + request.POST.get("page") + "/"
return redirect(url, permanent=True)
request.session["formdata"] = ""
jobs_list = (
JobPost.objects.filter(status="Live", job_type="walk-in")
.select_related("company", "user")
.prefetch_related("location", "skills", "industry")
)
searched_locations = (
searched_industry
) = searched_skills = searched_edu = searched_states = ""
if request.POST.get("refine_search") == "True":
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(reverse("walkin_jobs"))
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
current_date = datetime.now()
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta("walkin_jobs", {"page": page})
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"current_url": reverse("walkin_jobs"),
"show_pop_up": show_pop,
"current_date": current_date,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"searched_experience": request.POST.get("experience"),
"searched_job_type": "walk-in",
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
def government_jobs(request, **kwargs):
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(reverse("government_jobs"), permanent=True)
if "page" in request.GET:
url = reverse("government_jobs") + request.GET.get("page") + "/"
return redirect(url, permanent=True)
request.session["formdata"] = ""
jobs_list = (
JobPost.objects.filter(status="Live", job_type="government")
.select_related("company", "user")
.prefetch_related("location", "skills", "industry")
)
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(len(jobs_list)) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(reverse("government_jobs"))
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta("government_jobs", {"page": page})
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"job_type": "government",
"current_url": reverse("government_jobs"),
"show_pop_up": show_pop,
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
def each_company_jobs(request, company_name, **kwargs):
current_url = reverse("company_jobs", kwargs={"company_name": company_name})
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
company = Company.objects.filter(slug=company_name, is_active=True)
request.session["formdata"] = ""
if not company:
data = {
"message": "Sorry, no jobs available for " + company_name + " jobs",
"reason": "Unfortunately, we are unable to locate the job you are looking for",
"meta_title": "404 - Page Not Found - " + company_name + " - Peeljobs",
"meta_description": "404 No Jobs available for "
+ company_name
+ " - Peeljobs",
"data_empty": True,
}
if request.user.is_authenticated:
if str(request.user.user_type) == "RR":
return render(request, "recruiter/recruiter_404.html", data, status=404)
elif request.user.is_staff:
return render(request, "dashboard/404.html", data, status=404)
template = "404.html"
return render(request, template, data, status=404)
else:
company = company[0]
items_per_page = 10
job_list = (
company.get_jobposts()
.select_related("company", "user")
.prefetch_related("location", "skills", "industry")
.order_by("-published_on")
)
no_of_jobs = job_list.count()
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
skills = Skill.objects.filter(status="Active")
industries = Industry.objects.filter(status="Active")[:6]
jobs_list = job_list[(page - 1) * items_per_page : page * items_per_page]
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title = meta_description = h1_tag = ""
meta = MetaData.objects.filter(name="company_jobs")
if meta:
meta_title = Template(meta[0].meta_title).render(
Context({"current_page": page, "company": company})
)
meta_description = Template(meta[0].meta_description).render(
Context({"current_page": page, "company": company})
)
h1_tag = Template(meta[0].h1_tag).render(
Context({"current_page": page, "company": company})
)
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"skills": skills,
"company": company,
"current_url": current_url,
"show_pop_up": show_pop,
"industries": industries,
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/company_jobs.html"
return render(request, template, data)
def companies(request, **kwargs):
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(reverse("companies"), permanent=True)
if "page" in request.GET:
url = reverse("companies") + request.GET.get("page") + "/"
return redirect(url, permanent=True)
companies = (
Company.objects.annotate(num_posts=Count("jobpost"))
.filter(is_active=True)
.order_by("-num_posts")
)
alphabet_value = request.POST.get("alphabet_value")
if alphabet_value:
companies = companies.filter(name__istartswith=alphabet_value)
no_of_jobs = companies.count()
items_per_page = 48
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(reverse("companies"))
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
companies = companies[(page - 1) * items_per_page : page * items_per_page]
meta_title, meta_description, h1_tag = get_meta("companies_list", {"page": page})
data = {
"companies": companies,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"alphabet_value": alphabet_value if alphabet_value else None,
"current_url": reverse("companies"),
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/companies_list.html"
return render(request, template, data)
def get_skills(request):
skills = cache.get("subscribing_skills")
if not skills:
skills = Skill.objects.filter(status="Active").order_by("name")
skills = serializers.serialize("json", skills)
cache.set("subscribing_skills", skills, 60 * 60 * 24)
return HttpResponse(json.dumps({"response": skills}))
def skill_fresher_jobs(request, skill_name, **kwargs):
current_url = reverse("skill_fresher_jobs", kwargs={"skill_name": skill_name})
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
final_skill = get_valid_skills_list(skill_name)
final_locations = get_valid_locations_list(skill_name)
if final_locations:
return redirect(
reverse("location_fresher_jobs", kwargs={"city_name": skill_name}),
permanent=True,
)
if request.POST.get("refine_search") == "True":
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
elif final_skill:
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_skill", final_skill)
search_dict.update({"job_type": "Fresher"})
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
else:
jobs_list = searched_skills = []
if request.POST.get("q"):
ip_address = request.META["REMOTE_ADDR"]
save_search_results.delay(
ip_address,
request.POST,
jobs_list.count() if jobs_list else 0,
request.user.id,
)
if jobs_list:
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta_data(
"skill_fresher_jobs",
{
"skills": searched_skills,
"fresher": True,
"final_skill": final_skill,
"page": page,
},
)
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"is_job_list": False,
"fresher": True,
"current_url": current_url,
"show_pop_up": show_pop,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"searched_experience": request.POST.get("experience"),
"searched_job_type": "Fresher",
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
else:
meta_title = meta_description = ""
if searched_skills:
reason = "Only valid Skill names are accepted in search field"
skills = final_skill
status = 200
meta_title, meta_description = get_404_meta(
"skill_404", {"skill": skills, "fresher": True}
)
else:
status = 404
skills = list(filter(None, request.POST.get("q", "").split(", "))) or [
skill_name
]
reason = "Only valid Skill/city names are accepted"
template = "404.html"
return render(
request,
template,
{
"message": "Unfortunately, we are unable to locate the jobs you are looking for",
"searched_job_type": "Fresher",
"job_search": True,
"reason": reason,
"searched_skills": skills,
"meta_title": meta_title,
"meta_description": meta_description,
"data_empty": status != 200,
},
status=status,
)
def location_fresher_jobs(request, city_name, **kwargs):
current_url = reverse("location_fresher_jobs", kwargs={"city_name": city_name})
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
state = State.objects.filter(slug__iexact=city_name)
final_locations = get_valid_locations_list(city_name)
if request.POST.get("refine_search") == "True":
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
final_locations = final_locations + list(
searched_states.values_list("name", flat=True)
)
elif state:
final_locations = [state[0].name]
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_state", final_locations)
search_dict.update({"job_type": "Fresher"})
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
elif final_locations:
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_location", final_locations)
search_dict.update({"job_type": "Fresher"})
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
else:
jobs_list = searched_locations = []
if request.POST.get("location") or request.POST.get("q"):
ip_address = request.META["REMOTE_ADDR"]
save_search_results.delay(
ip_address,
request.POST,
jobs_list.count() if jobs_list else 0,
request.user.id,
)
if jobs_list:
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta_data(
"location_fresher_jobs",
{
"locations": searched_locations,
"final_location": set(final_locations),
"page": page,
"state": bool(state),
"fresher": True,
},
)
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"is_job_list": False,
"fresher": True,
"current_url": current_url,
"show_pop_up": show_pop,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"searched_experience": request.POST.get("experience"),
"searched_job_type": "Fresher",
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
"state": state.first(),
}
template = "jobs/jobs_list.html"
return render(request, template, data)
else:
if final_locations:
status = 200
reason = "Only valid cities names are accepted"
location = final_locations
meta_title, meta_description = get_404_meta(
"location_404", {"city": location, "fresher": True}
)
else:
status = 404
meta_title = meta_description = ""
location = list(
filter(None, request.POST.get("location", "").split(", "))
) or [city_name]
reason = "Only valid Skill/city names are accepted"
template = "404.html"
return render(
request,
template,
{
"message": "Unfortunately, we are unable to locate the jobs you are looking for",
"searched_job_type": "Fresher",
"job_search": True,
"reason": reason,
"meta_title": meta_title,
"meta_description": meta_description,
"searched_locations": location,
"data_empty": status != 200,
},
status=status,
)
def skill_location_walkin_jobs(request, skill_name, **kwargs):
if "-in-" in request.path:
current_url = reverse("location_walkin_jobs", kwargs={"skill_name": skill_name})
else:
current_url = reverse("skill_walkin_jobs", kwargs={"skill_name": skill_name})
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
final_skill = get_valid_skills_list(skill_name)
final_locations = get_valid_locations_list(skill_name)
state = State.objects.filter(slug__iexact=skill_name)
if request.POST.get("refine_search") == "True":
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
final_locations = final_locations + list(
searched_states.values_list("name", flat=True)
)
elif state:
searched_locations = state
final_locations = [state[0].name]
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_state", final_locations)
search_dict.update({"job_type": "Walk-in"})
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
elif final_locations or final_skill:
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_skill", final_skill)
search_dict.setlist("refine_location", final_locations)
search_dict.update({"job_type": "walk-in"})
if request.POST.get("experience"):
search_dict.update(
{"refine_experience_min": request.POST.get("experience")}
)
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
else:
jobs_list = []
if request.POST.get("location") or request.POST.get("q"):
ip_address = request.META["REMOTE_ADDR"]
save_search_results.delay(
ip_address,
request.POST,
jobs_list.count() if jobs_list else 0,
request.user.id,
)
if jobs_list:
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
if final_locations:
meta_title, meta_description, h1_tag = get_meta_data(
"location_walkin_jobs",
{
"locations": searched_locations,
"walkin": True,
"final_location": set(final_locations),
"page": page,
"state": bool(state),
},
)
else:
meta_title, meta_description, h1_tag = get_meta_data(
"skill_walkin_jobs",
{
"skills": searched_skills,
"walkin": True,
"final_skill": final_skill,
"page": page,
},
)
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"is_job_list": False,
"walkin": True,
"current_url": current_url,
"show_pop_up": show_pop,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"experience": request.POST.get("experience"),
"searched_job_type": "walk-in",
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
"state": state.first(),
}
template = "jobs/jobs_list.html"
return render(request, template, data)
else:
if "-in-" in request.path:
if final_locations:
location, skills = final_locations, []
status = 200
meta_title, meta_description = get_404_meta(
"location_404", {"city": location, "walkin": True}
)
else:
location, skills = (
list(filter(None, request.POST.get("location", "").split(", ")))
or [skill_name],
[],
)
status = 404
meta_title = meta_description = ""
else:
if final_skill:
skills, location = final_skill, []
status = 200
meta_title, meta_description = get_404_meta(
"skill_404", {"skill": skills, "walkin": True}
)
else:
status = 404
skills, location = (
list(filter(None, request.POST.get("q", "").split(", ")))
or [skill_name],
[],
)
meta_title = meta_description = ""
reason = "Only valid Skill/City names are accepted in search field"
template = "404.html"
return render(
request,
template,
{
"message": "Unfortunately, we are unable to locate the jobs you are looking for",
"searched_job_type": "walk-in",
"job_search": True,
"reason": reason,
"searched_skills": skills,
"meta_title": meta_title,
"meta_description": meta_description,
"searched_locations": location,
"data_empty": status != 200,
},
status=status,
)
def skill_location_wise_fresher_jobs(request, skill_name, city_name, **kwargs):
current_url = reverse(
"skill_location_wise_fresher_jobs",
kwargs={"skill_name": skill_name, "city_name": city_name},
)
if kwargs.get("page_num") == "1" or request.GET.get("page") == "1":
return redirect(current_url, permanent=True)
if "page" in request.GET:
url = current_url + request.GET.get("page") + "/"
return redirect(url, permanent=True)
final_skill = get_valid_skills_list(skill_name)
final_location = get_valid_locations_list(city_name)
if request.POST.get("refine_search") == "True":
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(request.POST)
elif final_skill and final_location:
search_dict = QueryDict("", mutable=True)
search_dict.setlist("refine_skill", final_skill)
search_dict.setlist("refine_location", final_location)
search_dict.update({"job_type": "Fresher"})
(
jobs_list,
searched_skills,
searched_locations,
searched_industry,
searched_edu,
searched_states,
) = refined_search(search_dict)
else:
jobs_list = []
if request.POST.get("location") or request.POST.get("q"):
ip_address = request.META["REMOTE_ADDR"]
save_search_results.delay(
ip_address,
request.POST,
jobs_list.count() if jobs_list else 0,
request.user.id,
)
if jobs_list:
no_of_jobs = jobs_list.count()
items_per_page = 20
no_pages = int(math.ceil(float(no_of_jobs) / items_per_page))
page = get_page_number(request, kwargs, no_pages)
if not page:
return HttpResponseRedirect(current_url)
prev_page, previous_page, aft_page, after_page = get_prev_after_pages_count(
page, no_pages
)
jobs_list = jobs_list[(page - 1) * items_per_page : page * items_per_page]
field = get_social_referer(request)
show_pop = True if field == "fb" or field == "tw" or field == "ln" else False
meta_title, meta_description, h1_tag = get_meta_data(
"skill_location_fresher_jobs",
{
"skills": searched_skills,
"locations": searched_locations,
"final_location": final_location,
"final_skill": final_skill,
"page": page,
},
)
data = {
"job_list": jobs_list,
"aft_page": aft_page,
"after_page": after_page,
"prev_page": prev_page,
"previous_page": previous_page,
"current_page": page,
"last_page": no_pages,
"no_of_jobs": no_of_jobs,
"is_job_list": False,
"show_pop_up": show_pop,
"current_url": current_url,
"searched_skills": searched_skills,
"searched_locations": searched_locations,
"searched_industry": searched_industry,
"searched_edu": searched_edu,
"searched_states": searched_states,
"searched_experience": request.POST.get("experience"),
"searched_job_type": "Fresher",
"fresher": True,
"meta_title": meta_title,
"meta_description": meta_description,
"h1_tag": h1_tag,
}
template = "jobs/jobs_list.html"
return render(request, template, data)
else:
status = 200 if final_skill and final_location else 404
reason = "Only valid Skill names are accepted in search field"
skills = (
final_skill
or list(filter(None, request.POST.get("q", "").split(", ")))
or [skill_name]
)
location = (
final_location
or list(filter(None, request.POST.get("location", "").split(", ")))
or [city_name]
)
template = "404.html"
if status == 200:
meta_title, meta_description = get_404_meta(
"skill_location_404",
{"skill": skills, "city": location, "fresher": True},
)
else:
meta_title = meta_description = ""
return render(
request,
template,
{
"message": "Unfortunately, we are unable to locate the jobs you are looking for",
"searched_job_type": "Fresher",
"job_search": True,
"reason": reason,
"searched_skills": skills,
"meta_title": meta_title,
"meta_description": meta_description,
"searched_locations": location,
"data_empty": status != 200,
},
status=status,
)
def add_other_location_to_user(user, request):
location = City.objects.filter(
name__iexact=request.POST.get("other_location").strip()
)
if location:
user.current_city = location[0]
else:
location = City.objects.create(
name=request.POST.get("other_location"),
status="Disabled",
slug=slugify(request.POST.get("other_location")),
state=State.objects.get(id=16),
)
user.current_city = location
user.save()
def save_codes_and_send_mail(user, request, passwd):
while True:
random_code = get_random_string(length=15)
if not User.objects.filter(activation_code__iexact=random_code):
break
while True:
unsubscribe_code = get_random_string(length=15)
if not User.objects.filter(unsubscribe_code__iexact=unsubscribe_code):
break
user.activation_code = random_code
user.unsubscribe_code = unsubscribe_code
user.save()
skills = request.POST.getlist("technical_skills") or request.POST.getlist("skill")
for s in skills:
skill = Skill.objects.filter(id=s)
if skill:
tech_skill = TechnicalSkill.objects.create(skill=skill[0])
user.skills.add(tech_skill)
temp = loader.get_template("email/jobseeker_account.html")
subject = "PeelJobs User Account Activation"
url = (
request.scheme
+ "://"
+ request.META["HTTP_HOST"]
+ "/user/activation/"
+ str(user.activation_code)
+ "/"
)
rendered = temp.render(
{
"activate_url": url,
"user_email": user.email,
"user_mobile": user.mobile,
"user": user,
"user_password": passwd,
"user_profile": user.profile_completion_percentage,
}
)
mto = user.email
send_email.delay(mto, subject, rendered)
def register_using_email(request):
if request.method == "POST":
if request.FILES.get("get_resume"):
handle_uploaded_file(
request.FILES["get_resume"], request.FILES["get_resume"].name
)
email, mobile, text = get_resume_data(request.FILES["get_resume"])
data = {
"error": False,
"resume_email": email,
"resume_mobile": mobile,
"text": text,
}
return HttpResponse(json.dumps(data))
validate_user = UserEmailRegisterForm(request.POST, request.FILES)
if validate_user.is_valid():
if not (
User.objects.filter(email__iexact=request.POST.get("email"))
or User.objects.filter(username__iexact=request.POST.get("email"))
):
email = request.POST.get("email")
password = request.POST.get("password")
registered_from = request.POST.get("register_from", "Email")
user = User.objects.create(
username=email,
email=email,
user_type="JS",
registered_from=registered_from,
)
user = UserEmailRegisterForm(request.POST, instance=user)
user = user.save(commit=False)
if request.POST.get("other_loc"):
add_other_location_to_user(user, request)
user.email_notifications = (
request.POST.get("email_notifications") == "on"
)
user.set_password(password)
user.referer = request.session.get("referer", "")
user.save()
save_codes_and_send_mail(user, request, password)
if "resume" in request.FILES:
conn = tinys3.Connection(
settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY
)
random_string = "".join(
random.choice("0123456789ABCDEF") for i in range(3)
)
user_id = str(user.id) + str(random_string)
path = (
"resume/"
+ user_id
+ "/"
+ request.FILES["resume"]
.name.replace(" ", "-")
.encode("ascii", "ignore")
.decode("ascii")
)
conn.upload(
path,
request.FILES["resume"],
settings.AWS_STORAGE_BUCKET_NAME,
public=True,
expires="max",
)
user.resume = path
user.profile_updated = datetime.now(timezone.utc)
user.save()
registered_user = authenticate(username=user.username)
if registered_user:
login(request, registered_user)
UserEmail.objects.create(user=user, email=email, is_primary=True)
redirect_url = reverse("user_reg_success")
if request.POST.get("detail_page"):
redirect_url = request.POST.get("detail_page")
data = {
"error": False,
"response": "Registered Successfully",
"redirect_url": redirect_url,
}
return HttpResponse(json.dumps(data))
else:
data = {
"error": True,
"response": "User With This Email Already exists ",
}
return HttpResponse(json.dumps(data))
else:
data = {"error": True, "response": validate_user.errors}
return HttpResponse(json.dumps(data))
return HttpResponseRedirect("/index")
def user_activation(request, user_id):
user = User.objects.filter(activation_code__iexact=str(user_id)).first()
if user:
registered_user = authenticate(username=user.username)
if not request.user.is_authenticated:
if not hasattr(user, "backend"):
for backend in settings.AUTHENTICATION_BACKENDS:
if user == load_backend(backend).get_user(user.id):
user.backend = backend
break
if hasattr(user, "backend"):
login(request, user)
url = "/profile/" if user.is_active else "/profile/?verify=true"
user.is_active = True
user.email_verified = True
user.last_login = datetime.now()
user.activation_code = ""
user.save()
return HttpResponseRedirect(url)
else:
message = "Looks like Activation Url Expired"
reason = "The URL may be misspelled or the user you're looking for is no longer available."
template = "404.html"
return render(
request, template, {"message": message, "reason": reason}, status=404
)
def login_user_email(request):
if request.method == "POST":
validate_user = AuthenticationForm(request.POST)
if validate_user.is_valid():
email = request.POST.get("email")
password = request.POST.get("password")
usr = authenticate(username=email, password=password)
if usr:
usr.last_login = datetime.now()
usr.save()
login(request, usr)
data = {"error": False, "response": "Logged In Successfully"}
data["redirect_url"] = "/profile/"
if request.user.user_type == "JS" and request.session.get("job_id"):
post = JobPost.objects.filter(
id=request.session["job_id"], status="Live"
).first()
if (
post
and usr.is_active
and usr.profile_completion_percentage >= 50
or usr.resume
):
job_apply(request, request.session["job_id"])
data["redirect_url"] = (
post.get_absolute_url() + "?job_apply=applied"
if post
else "/"
)
else:
url = post.slug + "?job_apply=apply" if post else "/profile/"
data["redirect_url"] = url
elif request.user.is_recruiter or request.user.is_agency_recruiter:
data["redirect_url"] = "/recruiter/"
else:
data["redirect_url"] = "/dashboard/"
if request.POST.get("next"):
data["redirect_url"] = request.POST.get("next")
if request.POST.get("detail_page"):
data["rediret_url"] = request.POST.get("detail_page")
else:
data = {
"error": True,
"response_message": "Username Password didn't match",
}
return HttpResponse(json.dumps(data))
else:
data = {"error": True, "response": validate_user.errors}
return HttpResponse(json.dumps(data))
return HttpResponseRedirect("/")
def set_password(request, user_id, passwd):
user = User.objects.filter(id=user_id)
if request.method == "POST":
validate_changepassword = UserPassChangeForm(request.POST)
if validate_changepassword.is_valid():
if request.POST["new_password"] != request.POST["retype_password"]:
return HttpResponse(
json.dumps(
{
"error": True,
"response_message": "Password and Confirm Password did not match",
}
)
)
user = user[0]
user.set_password(request.POST["new_password"])
user.save()
# usr = authenticate(
# username=user.email, password=request.POST["new_password"]
# )
# if usr:
# usr.last_login = datetime.now()
# usr.save()
# login(request, usr)
if user.user_type == "JS":
url = "/"
else:
url = reverse("recruiter:new_user")
return HttpResponse(
json.dumps(
{
"error": False,
"message": "Password changed successfully",
"url": url,
}
)
)
else:
return HttpResponse(
json.dumps({"error": True, "response": validate_changepassword.errors})
)
if user:
usr = authenticate(username=user[0], password=passwd)
if usr:
return render(request, "set_password.html")
template = "404.html"
return render(
request,
template,
{"message": "Not Found", "reason": "URL may Expired"},
status=404,
)
def forgot_password(request):
form_valid = ForgotPassForm(request.POST)
if form_valid.is_valid():
user = User.objects.filter(email=request.POST.get("email")).first()
if user and (user.is_recruiter or user.is_agency_admin):
data = {
"error": True,
"response_message": "User Already registered as a Recruiter",
}
return HttpResponse(json.dumps(data))
if user:
new_pass = get_random_string(length=10).lower()
user.set_password(new_pass)
user.save()
temp = loader.get_template("email/subscription_success.html")
subject = "Password Reset - PeelJobs"
mto = request.POST.get("email")
url = (
request.scheme
+ "://"
+ request.META["HTTP_HOST"]
+ "/user/set_password/"
+ str(user.id)
+ "/"
+ str(new_pass)
+ "/"
)
c = {"randpwd": new_pass, "user": user, "redirect_url": url}
rendered = temp.render(c)
user_active = True if user.is_active else False
send_email.delay(mto, subject, rendered)
data = {"error": False, "response": "Success", "redirect_url": "/"}
else:
data = {
"error": True,
"response_message": "User doesn't exist with this Email",
}
return HttpResponse(json.dumps(data))
data = {"error": True, "response": form_valid.errors}
return HttpResponse(json.dumps(data))
@jobseeker_login_required
def user_reg_success(request):
if not request.user.is_authenticated:
reason = "The URL may be misspelled or the page you're looking for is no longer available."
template = "404.html"
return render(
request,
template,
{"message": "Sorry, Page Not Found", "reason": reason},
status=404,
)
if request.method == "POST":
validate_user = UserEmailRegisterForm(
request.POST, request.FILES, instance=request.user
)
if validate_user.is_valid():
user = validate_user.save(commit=False)
while True:
unsubscribe_code = get_random_string(length=15)
if not User.objects.filter(unsubscribe_code__iexact=unsubscribe_code):
break
user.unsubscribe_code = unsubscribe_code
user.save()
for s in request.POST.getlist("technical_skills"):
skill = Skill.objects.filter(id=s)
if skill:
skill = skill[0]
tech_skill = TechnicalSkill.objects.create(skill=skill)
user.skills.add(tech_skill)
if "resume" in request.FILES:
conn = tinys3.Connection(
settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY
)
random_string = "".join(
random.choice("0123456789ABCDEF") for i in range(3)
)
user_id = str(user.id) + str(random_string)
path = (
"resume/"
+ user_id
+ "/"
+ request.FILES["resume"]
.name.replace(" ", "-")
.encode("ascii", "ignore")
.decode("ascii")
)
conn.upload(
path,
request.FILES["resume"],
settings.AWS_STORAGE_BUCKET_NAME,
public=True,
expires="max",
)
user.resume = path
user.profile_updated = datetime.now(timezone.utc)
user.save()
data = {"error": False, "response": "Profile Updated Successfully"}
return HttpResponse(json.dumps(data))
data = {"error": True, "response": validate_user.errors}
return HttpResponse(json.dumps(data))
if request.user.registered_from == "Social" and not request.user.mobile:
template_name = "candidate/social_register.html"
return render(request, template_name)
template = "candidate/user_reg_success.html"
return render(request, template)
def user_subscribe(request):
skills = Skill.objects.filter(status="Active")
if request.method == "POST":
validate_subscribe = SubscribeForm(request.POST)
email = request.POST.get("email")
user = User.objects.filter(email__iexact=email).first()
if user and not user.user_type == "JS":
data = {
"error": True,
"response_message": "Admin is not allowed to Subscribe"
if user.is_staff
else "Recruiter/Agency is not allowed to Subscribe",
}
return HttpResponse(json.dumps(data))
if validate_subscribe.is_valid():
all_subscribers = (
Subscriber.objects.filter(user=request.user)
if request.user.is_authenticated
else Subscriber.objects.filter(email=email, user=None)
)
if request.POST.get("subscribe_from"):
if not all_subscribers:
for skill in skills:
sub_code = subscribers_creation_with_skills(
email,
skill,
request.user if request.user.is_authenticated else "",
)
data = {"error": False, "response": "Successfully Subscribed"}
else:
data = {
"error": True,
"response_message": "User with this email id already subscribed",
}
elif request.POST.getlist("skill"):
all_subscribers = all_subscribers.filter(
skill__in=request.POST.getlist("skill")
)
if int(all_subscribers.count()) != int(
len(request.POST.getlist("skill"))
):
for skill in request.POST.getlist("skill"):
skill = Skill.objects.get(id=skill)
sub_code = subscribers_creation_with_skills(
email,
skill,
request.user if request.user.is_authenticated else "",
)
data = {"error": False, "response": "experience added"}
else:
data = {
"error": True,
"response_message": "User with this email id and skill(s) already subscribed",
}
else:
data = {
"error": True,
"response_message": "Please Enter atleast one skill",
}
if not data.get("error"):
t = loader.get_template("email/subscription_success.html")
skills = Skill.objects.filter(id__in=request.POST.getlist("skill"))
url = (
request.scheme
+ "://"
+ request.META["HTTP_HOST"]
+ "/subscriber/verification/"
+ str(sub_code)
+ "/"
)
c = {"user_email": email, "skills": skills, "redirect_url": url}
subject = "PeelJobs New Subscription"
rendered = t.render(c)
mto = [email]
send_email.delay(mto, subject, rendered)
return HttpResponse(json.dumps(data))
else:
data = {"error": True, "response": validate_subscribe.errors}
return HttpResponse(json.dumps(data))
return HttpResponseRedirect("/")
def process_email(request):
body_unicode = request.body.decode("utf-8")
body = json.loads(body_unicode)
search = re.search(r"[\w\.-]+@[\w\.-]+", body.get("Message"))
if search:
email = search.group(0)
users = User.objects.filter(email__iexact=email)
if not users:
user = User.objects.create(
username=email, email=email, user_type="JS", registered_from="Careers"
)
randpwd = rand_string(size=10).lower()
user.set_password(randpwd)
user.save()
save_codes_and_send_mail(user, request, randpwd)
return HttpResponseRedirect("/")
| MicroPyramid/opensource-job-portal | pjob/views.py | views.py | py | 117,784 | python | en | code | 336 | github-code | 36 |
19996296105 | from django.shortcuts import render, redirect
from django.http import Http404, HttpResponseRedirect
from django.urls import reverse
from .models import Article, Category, ArticleCategoryRelation
from django.utils import timezone
from .forms import UserRegistrationForm
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.views import View
from django.views.generic import ListView
import json
from django.http import JsonResponse
from django.template.loader import render_to_string
items_on_page = 3
def index(request):
article_list = Article.objects.all()[:items_on_page]
categories = Category.objects.all()
return render(request, 'articles/main.html', {'all_articles': article_list,
'categories': categories,
'is_admin': request.user.is_staff, })
class ArticleListView(ListView):
def get(self, request, **kwargs):
user = request.user
if user.groups.filter(name='admin').count():
is_admin = True
article_list = Article.objects.all()
current_page = Paginator(article_list, items_on_page)
page = request.GET.get('page')
try:
page_articles = current_page.page(page)
except:
page_articles = current_page.page(1)
data = json.dumps(list(Article.objects.values_list('id', 'title')))
categories = Category.objects.all()
return render(request, 'articles/list.html',
{'all_articles': page_articles, 'is_admin': request.user.is_staff, 'qs_json': data,
'categories': categories, })
def create_article(request):
if not request.user.is_staff:
raise Http404('Доступ запрещен!')
if request.method == 'POST':
try:
category_choices = [x for x in request.POST.getlist('category')]
category_list = [Category.objects.get(id=category_id) for category_id in category_choices]
except:
raise Http404('Категория не найдена!')
request.user.article_set.create(title=request.POST['title'], text=request.POST['text'], date=timezone.now())
current_article = Article.objects.all()[0]
for category in category_list:
category.includes_article.add(current_article)
return redirect('/')
category_list = Category.objects.all()
return render(request, 'articles/create.html', {'category_list': category_list})
def update_article(request, article_id):
if not request.user.is_staff:
raise Http404('Доступ запрещен!')
current_article = Article.objects.get(id=article_id)
if not current_article:
raise Http404('Статья не найдена!')
if request.method == 'POST':
# try:
# category_choices = [x for x in request.POST.getlist('category')]
# category_list = [Category.objects.get(id=category_id) for category_id in category_choices]
# except:
# raise Http404('Категория не найдена!')
current_article.title=request.POST['title']
current_article.text=request.POST['text']
current_article.save()
# ArticleCategoryRelation.objects.filter(article=current_article).delete()
#
# for category in category_list:
# category.includes_article.add(current_article)
return redirect('/')
category_list = Category.objects.all()
category_of_article = ArticleCategoryRelation.objects.filter(article=current_article)
return render(request, 'articles/update.html', {'category_list': category_list,
'article': current_article,
'article_category': category_of_article})
def leave_comment(request, article_id):
try:
article = Article.objects.get(id=article_id)
except:
raise Http404('Статья не найдена!')
article.comment_set.create(author=request.user, text=request.POST['text'], date=timezone.now())
return HttpResponseRedirect(reverse('newnotes:view_article', args=(article.id,)))
def profile(request):
if request.user.is_anonymous:
raise Http404('Доступ запрещен!')
categories = Category.objects.all()
return render(request, 'account/profile.html', {'categories': categories, })
def register(request):
if request.method == 'POST':
form = UserRegistrationForm(request.POST)
if form.is_valid():
new_user = form.save()
return render(request, 'registration/register_done.html', {'new_user': new_user})
else:
print(form.errors.as_data())
else:
form = UserRegistrationForm()
return render(request, 'registration/register.html', {'form': form})
def delete_article(request, article_id):
if not request.user.is_staff:
raise Http404('Доступ запрещен!')
try:
article = Article.objects.get(id=article_id)
except:
raise Http404('Статья не найдена!')
if request.method == "POST":
article.delete()
return redirect('/')
return render(request, 'articles/delete.html', {'article': article})
def create_category(request):
if not request.user.is_staff:
raise Http404('Доступ запрещен!')
if request.method == 'POST':
Category.objects.create(name=request.POST['name'])
return redirect('/')
category_list = Category.objects.all()
return render(request, 'categories/create.html', {'category_list': category_list, })
def delete_category(request, category_id):
if not request.user.is_staff:
raise Http404('Доступ запрещен!')
try:
category = Category.objects.get(id=category_id)
except:
raise Http404('Категория не найдена!')
if request.method == "POST":
category.delete()
return redirect('/')
category_list = Category.objects.all()
return render(request, 'categories/delete.html', {'category': category, 'category_list': category_list, })
def update_category(request, category_id):
if not request.user.is_staff:
raise Http404('Доступ запрещен!')
try:
category = Category.objects.get(id=category_id)
except:
raise Http404('Категория не найдена!')
if request.method == 'POST':
Category.objects.filter(id=category_id).update(name=request.POST['name'])
return redirect('/')
category_list = Category.objects.all()
return render(request, 'categories/update.html', {'category': category, 'category_list': category_list, })
class ListCategoryArticles(ListView):
def get(self, request, category_id, **kwargs):
rel_category_article = ArticleCategoryRelation.objects.filter(category=category_id).order_by('-id')
category = Category.objects.all().get(id=category_id)
article_list = [Article.objects.get(id=x.article.id) for x in rel_category_article]
current_page = Paginator(article_list, items_on_page)
page = request.GET.get('page')
try:
context = current_page.page(page)
except:
context = current_page.page(1)
data = json.dumps(list(Article.objects.values_list('id', 'title')))
categories = Category.objects.all()
return render(request, 'categories/list.html',
{'all_articles': context, 'is_admin': request.user.is_staff, 'qs_json': data,
'categories': categories,
'category': category, })
def get_paginated_page(request, objects, number=items_on_page):
current_page = Paginator(objects, number)
page = request.GET.get('page') if request.method == 'GET' else request.POST.get('page')
try:
return current_page.page(page)
except PageNotAnInteger:
return current_page.page(1)
except EmptyPage:
return current_page.page(current_page.num_pages)
def is_ajax(request):
return request.META.get('HTTP_X_REQUESTED_WITH') == 'XMLHttpRequest'
class ViewArticle(View):
def get(self, request, article_id):
try:
article = Article.objects.get(id=article_id)
except:
raise Http404('Статья не найдена!')
list_comments = article.comment_set.order_by('-id')
if not request.user.is_anonymous:
article.readers.add(request.user)
watched = article.readers.count()
categories = Category.objects.all()
return render(request, 'articles/view.html',
{'article': article, 'list_comments': get_paginated_page(request, list_comments),
'watched': watched, 'categories': categories, })
def post(self, request, article_id):
if is_ajax(request):
try:
article = Article.objects.get(id=article_id)
except:
raise Http404('Статья не найдена!')
return JsonResponse({
"result": True,
"comms": render_to_string(
request=request,
template_name='articles/comms.html',
context={'list_comments': get_paginated_page(request, article.comment_set.order_by('-id'))}
)
})
else:
raise Http404()
| osinkel/articles-django | newnotes/views.py | views.py | py | 9,576 | python | en | code | 0 | github-code | 36 |
10650749218 | # This class defines the control data that we want to keep for debugging purposes
class LogDataSet():
def __init__(self):
# sensors' values
self.sensors = LogSensorsData()
# control values
self.control = LogControlData()
def setSensorsValues(self, axes, gyroscopeRate):
# set sensors' values
self.sensors = axes
self.sensors["gyroscopeRate"] = gyroscopeRate
#self.sensors.setSensorsValues(axes, gyroscopeRate)
def setControlValues(self, accelerometerAngle, angle, error, integral_error, differential_error, u, dt):
# set the control values
self.control.setControlValues(accelerometerAngle, angle, error, integral_error, differential_error, u, dt)
class LogSensorsData:
def __init__(self):
# sensors' values
self.axes = None
self.gyroscopeRate = None
class LogControlData:
def __init__(self):
# control values
self.accelerometerAngle = None
self.angle = None
self.error = None
self.integral_error = None
self.differential_error = None
self.u = None
self.dt = None
def setControlValues(self, accelerometerAngle, angle, error, integral_error, differential_error, u, dt):
# set the control values
self.accelerometerAngle = accelerometerAngle
self.angle = angle
self.error = error
self.integral_error = integral_error
self.differential_error = differential_error
self.u = u
self.dt = dt | antrew/yarapibabot | src/log_data_set.py | log_data_set.py | py | 1,367 | python | en | code | 3 | github-code | 36 |
25124748823 | import numpy as np
class GradientDescentLinearRegression:
def __init__(self, learning_rate=0.01, iterations=1000):
self.learning_rate, self.iterations = learning_rate, iterations
def fit(self, X, y):
b = 0
m = 5
n = X.shape[0]
for _ in range(self.iterations):
b_gradient = -2 * np.sum(y - m*X + b) / n
m_gradient = -2 * np.sum(X*(y - (m*X + b))) / n
b = b + (self.learning_rate * b_gradient)
m = m - (self.learning_rate * m_gradient)
self.m, self.b = m, b
def predict(self, X):
return self.m*X + self.b
np.random.seed(42)
X = np.array(sorted(list(range(5))*20)) + np.random.normal(size=100, scale=0.5)
y = np.array(sorted(list(range(5))*20)) + np.random.normal(size=100, scale=0.25)
clf = GradientDescentLinearRegression()
clf.fit(X, y)
import matplotlib.pyplot as plt
plt.style.use('fivethirtyeight')
plt.scatter(X, y, color='black')
plt.plot(X, clf.predict(X))
plt.gca().set_title("Gradient Descent Linear Regressor")
print("The intercept of the best fit line, b= ",clf.b)
print("The slope of the best fit line, m= ",clf.m)
| TanizzCoder/ANN | Gradient_Regression.py | Gradient_Regression.py | py | 1,163 | python | en | code | 1 | github-code | 36 |
21676970380 | #!/usr/bin/python3
# birds{id:{'pos':[x,y], 'ori':[x,y]}}
pos = 'pos'
ori = 'dir'
X = 0
Y = 1
swarmSize = 1
swarm = {1:{pos:[0,0], ori:[0,0]}}
def updatePos(target):
target[pos] = [target[pos][X]+target[ori][X], target[pos][Y]+target[ori][Y]]
for target in swarm:
force = [0,0]
for neighbor in repulsionZone(target):
force += target[param[repulsion]] * vector(target, neighbor) * (1/distance(target, neighbor))
for neighbor in alignmentZone(target):
force += target[param[alignment]] * neighbor[ori] * (1/distance(target, neighbor))
for neighbor in attractionZone(target):
force += target[param[attraction]] * vector(target, neighbor) * (1/distance(target, neighbor))
dx = force[X]
dy = force[Y]
target[ori] = [dx,dy]
| jamie314159/swarm | swarm.py | swarm.py | py | 745 | python | en | code | 0 | github-code | 36 |
12136530301 | """
This file is meant to optimize the import speed. Import modules from YOLOv7 projects
and Ultralytics take significant amount of time
"""
import glob
import math
import logging
import numpy as np
import os
import re
import time
import urllib
from pathlib import Path
from PIL import Image, ImageDraw, ImageFont
from threading import Thread
import cv2
import torch
import torch.nn as nn
import torchvision
logging.basicConfig(filename="history.log",
format="%(asctime)s - %(levelname)s - %(module)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S %p",
level=logging.INFO)
"""
From utils.general
"""
def box_iou(box1, box2):
# https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py
"""
Return intersection-over-union (Jaccard index) of boxes.
Both sets of boxes are expected to be in (x1, y1, x2, y2) format.
Arguments:
box1 (Tensor[N, 4])
box2 (Tensor[M, 4])
Returns:
iou (Tensor[N, M]): the NxM matrix containing the pairwise
IoU values for every element in boxes1 and boxes2
"""
def box_area(box):
# box = 4xn
return (box[2] - box[0]) * (box[3] - box[1])
area1 = box_area(box1.T)
area2 = box_area(box2.T)
# inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)
inter = (torch.min(box1[:, None, 2:], box2[:, 2:]) - torch.max(box1[:, None, :2], box2[:, :2])).clamp(0).prod(2)
return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter)
def clean_str(s):
# Cleans a string by replacing special characters with underscore _
return re.sub(pattern="[|@#!¡·$€%&()=?¿^*;:,¨´><+]", repl="_", string=s)
def make_divisible(x, divisor):
# Returns x evenly divisible by divisor
return math.ceil(x / divisor) * divisor
def check_img_size(img_size, s=32):
# Verify img_size is a multiple of stride s
new_size = make_divisible(img_size, int(s)) # ceil gs-multiple
if new_size != img_size:
print('WARNING: --img-size %g must be multiple of max stride %g, updating to %g' % (img_size, s, new_size))
return new_size
def clip_coords(boxes, img_shape):
# Clip bounding xyxy bounding boxes to image shape (height, width)
boxes[:, 0].clamp_(0, img_shape[1]) # x1
boxes[:, 1].clamp_(0, img_shape[0]) # y1
boxes[:, 2].clamp_(0, img_shape[1]) # x2
boxes[:, 3].clamp_(0, img_shape[0]) # y2
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
# Rescale coords (xyxy) from img1_shape to img0_shape
if ratio_pad is None: # calculate from img0_shape
gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new
pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding
else:
gain = ratio_pad[0][0]
pad = ratio_pad[1]
coords[:, [0, 2]] -= pad[0] # x padding
coords[:, [1, 3]] -= pad[1] # y padding
coords[:, :4] /= gain
clip_coords(coords, img0_shape)
return coords
def xyxy2xywh(x):
# Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center
y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center
y[:, 2] = x[:, 2] - x[:, 0] # width
y[:, 3] = x[:, 3] - x[:, 1] # height
return y
def xywh2xyxy(x):
# Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
return y
def apply_classifier(x, model, img, im0):
# applies a second stage classifier to yolo outputs
im0 = [im0] if isinstance(im0, np.ndarray) else im0
for i, d in enumerate(x): # per image
if d is not None and len(d):
d = d.clone()
# Reshape and pad cutouts
b = xyxy2xywh(d[:, :4]) # boxes
b[:, 2:] = b[:, 2:].max(1)[0].unsqueeze(1) # rectangle to square
b[:, 2:] = b[:, 2:] * 1.3 + 30 # pad
d[:, :4] = xywh2xyxy(b).long()
# Rescale boxes from img_size to im0 size
scale_coords(img.shape[2:], d[:, :4], im0[i].shape)
# Classes
pred_cls1 = d[:, 5].long()
ims = []
for j, a in enumerate(d): # per item
cutout = im0[i][int(a[1]):int(a[3]), int(a[0]):int(a[2])]
im = cv2.resize(cutout, (224, 224)) # BGR
# cv2.imwrite('test%i.jpg' % j, cutout)
im = im[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
im = np.ascontiguousarray(im, dtype=np.float32) # uint8 to float32
im /= 255.0 # 0 - 255 to 0.0 - 1.0
ims.append(im)
pred_cls2 = model(torch.Tensor(ims).to(d.device)).argmax(1) # classifier prediction
x[i] = x[i][pred_cls1 == pred_cls2] # retain matching class detections
return x
def non_max_suppression(prediction, conf_thres=0.25, iou_thres=0.45, classes=None, agnostic=False, multi_label=False,
labels=()):
"""Runs Non-Maximum Suppression (NMS) on inference results
Returns:
list of detections, on (n,6) tensor per image [xyxy, conf, cls]
"""
nc = prediction.shape[2] - 5 # number of classes
xc = prediction[..., 4] > conf_thres # candidates
# Settings
min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
max_det = 300 # maximum number of detections per image
max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
time_limit = 10.0 # seconds to quit after
redundant = True # require redundant detections
multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)
merge = False # use merge-NMS
t = time.time()
output = [torch.zeros((0, 6), device=prediction.device)] * prediction.shape[0]
for xi, x in enumerate(prediction): # image index, image inference
# Apply constraints
# x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height
x = x[xc[xi]] # confidence
# Cat apriori labels if autolabelling
if labels and len(labels[xi]):
l = labels[xi]
v = torch.zeros((len(l), nc + 5), device=x.device)
v[:, :4] = l[:, 1:5] # box
v[:, 4] = 1.0 # conf
v[range(len(l)), l[:, 0].long() + 5] = 1.0 # cls
x = torch.cat((x, v), 0)
# If none remain process next image
if not x.shape[0]:
continue
# Compute conf
if nc == 1:
x[:, 5:] = x[:, 4:5] # for models with one class, cls_loss is 0 and cls_conf is always 0.5,
# so there is no need to multiplicate.
else:
x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
# Box (center x, center y, width, height) to (x1, y1, x2, y2)
box = xywh2xyxy(x[:, :4])
# Detections matrix nx6 (xyxy, conf, cls)
if multi_label:
i, j = (x[:, 5:] > conf_thres).nonzero(as_tuple=False).T
x = torch.cat((box[i], x[i, j + 5, None], j[:, None].float()), 1)
else: # best class only
conf, j = x[:, 5:].max(1, keepdim=True)
x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
# Filter by class
if classes is not None:
x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]
# Apply finite constraint
# if not torch.isfinite(x).all():
# x = x[torch.isfinite(x).all(1)]
# Check shape
n = x.shape[0] # number of boxes
if not n: # no boxes
continue
elif n > max_nms: # excess boxes
x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
# Batched NMS
c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS
if i.shape[0] > max_det: # limit detections
i = i[:max_det]
if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)
# update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)
iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix
weights = iou * scores[None] # box weights
x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes
if redundant:
i = i[iou.sum(1) > 1] # require redundancy
output[xi] = x[i]
if (time.time() - t) > time_limit:
print(f'WARNING: NMS time limit {time_limit}s exceeded')
break # time limit exceeded
return output
"""
From models.common.py
"""
def autopad(k, p=None): # kernel, padding
# Pad to 'same'
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
class Conv(nn.Module):
# Standard convolution
def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups
super(Conv, self).__init__()
self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)
self.bn = nn.BatchNorm2d(c2)
self.act = nn.SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())
def forward(self, x):
return self.act(self.bn(self.conv(x)))
def fuseforward(self, x):
return self.act(self.conv(x))
"""
From models.experimental.py
"""
class Ensemble(nn.ModuleList):
# Ensemble of models
def __init__(self):
super(Ensemble, self).__init__()
def forward(self, x, augment=False):
y = []
for module in self:
y.append(module(x, augment)[0])
# y = torch.stack(y).max(0)[0] # max ensemble
# y = torch.stack(y).mean(0) # mean ensemble
y = torch.cat(y, 1) # nms ensemble
return y, None # inference, train output
def attempt_load(weights, map_location=None):
# Loads an ensemble of models weights=[a,b,c] or a single model weights=[a] or weights=a
model = Ensemble()
for w in weights if isinstance(weights, list) else [weights]:
# attempt_download(w)
ckpt = torch.load(w, map_location=map_location) # load
model.append(ckpt['ema' if ckpt.get('ema') else 'model'].float().fuse().eval()) # FP32 model
# Compatibility updates
for m in model.modules():
if type(m) in [nn.Hardswish, nn.LeakyReLU, nn.ReLU, nn.ReLU6, nn.SiLU]:
m.inplace = True # pytorch 1.7.0 compatibility
elif type(m) is nn.Upsample:
m.recompute_scale_factor = None # torch 1.11.0 compatibility
elif type(m) is Conv:
m._non_persistent_buffers_set = set() # pytorch 1.6.0 compatibility
if len(model) == 1:
return model[-1] # return model
else:
print('Ensemble created with %s\n' % weights)
for k in ['names', 'stride']:
setattr(model, k, getattr(model[-1], k))
return model # return ensemble
"""
From utils.datasets.py
"""
img_formats = ['bmp', 'jpg', 'jpeg', 'png', 'tif', 'tiff', 'dng', 'webp', 'mpo'] # acceptable image suffixes
vid_formats = ['mov', 'avi', 'mp4', 'mpg', 'mpeg', 'm4v', 'wmv', 'mkv', 'webm'] # acceptable video suffixes
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
# Resize and pad image while meeting stride-multiple constraints
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
class LoadImages: # for inference
def __init__(self, path, img_size=640, stride=32):
p = str(Path(path).absolute()) # os-agnostic absolute path
if '*' in p:
files = sorted(glob.glob(p, recursive=True)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception(f'ERROR: {p} does not exist')
images = [x for x in files if x.split('.')[-1].lower() in img_formats]
videos = [x for x in files if x.split('.')[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.stride = stride
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'image'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, f'No images or videos found in {p}. ' \
f'Supported formats are:\nimages: {img_formats}\nvideos: {vid_formats}'
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
# print(f'video {self.count + 1}/{self.nf} ({self.frame}/{self.nframes}) {path}: \n', end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
#print(f'image {self.count}/{self.nf} {path}: ', end='')
# Padded resize
img = letterbox(img0, self.img_size, stride=self.stride)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640, stride=32):
self.mode = 'stream'
self.img_size = img_size
self.stride = stride
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().strip().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = [clean_str(x) for x in sources] # clean source names for later
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print(f'{i + 1}/{n}: {s}... ', end='')
url = eval(s) if s.isnumeric() else s
# Remove support for Youtube video
cap = cv2.VideoCapture(url)
assert cap.isOpened(), f'Failed to open {s}'
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
self.fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(f' success ({w}x{h} at {self.fps:.2f} FPS).')
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, self.img_size, stride=self.stride)[0].shape for x in self.imgs], 0) # shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
success, im = cap.retrieve()
self.imgs[index] = im if success else self.imgs[index] * 0
n = 0
if self.fps != 0:
time.sleep(1 / self.fps) # wait time
else:
time.sleep(0.2) # in rtsp situation self.fps may be zero. to avoid div by zero, take constant sleep.
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, self.img_size, auto=self.rect, stride=self.stride)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
"""
From ultralytics.yolo.utils.check.py
"""
FILE = Path(__file__).resolve()
ROOT = FILE.parents[2] # YOLO
def check_suffix(file='yolov8n.pt', suffix=('.pt',), msg=''):
# Check file(s) for acceptable suffix
if file and suffix:
if isinstance(suffix, str):
suffix = [suffix]
for f in file if isinstance(file, (list, tuple)) else [file]:
s = Path(f).suffix.lower() # file suffix
if len(s):
assert s in suffix, f"{msg}{f} acceptable suffix is {suffix}"
def check_file(file, suffix=''):
# Search/download file (if necessary) and return path
check_suffix(file, suffix) # optional
file = str(file) # convert to str()
if Path(file).is_file() or not file: # exists
return file
elif file.startswith(('http:/', 'https:/')): # download
url = file # warning: Pathlib turns :// -> :/
file = Path(urllib.parse.unquote(file).split('?')[0]).name # '%2F' to '/', split https://url.com/file.txt?auth
if Path(file).is_file():
logging.info(f'Found {url} locally at {file}') # file already exists
else:
logging.info(f'Downloading {url} to {file}...')
torch.hub.download_url_to_file(url, file)
assert Path(file).exists() and Path(file).stat().st_size > 0, f'File download failed: {url}' # check
return file
else: # search
files = []
for d in 'models', 'yolo/data': # search directories
files.extend(glob.glob(str(ROOT / d / '**' / file), recursive=True)) # find file
if not files:
raise FileNotFoundError(f"'{file}' does not exist")
elif len(files) > 1:
raise FileNotFoundError(f"Multiple files match '{file}', specify exact path: {files}")
return files[0] # return file
"""
From ultralytics.yolo.utils.check.py
"""
def is_ascii(s) -> bool:
"""
Check if a string is composed of only ASCII characters.
Args:
s (str): String to be checked.
Returns:
bool: True if the string is composed only of ASCII characters, False otherwise.
"""
# Convert list, tuple, None, etc. to string
s = str(s)
# Check if the string is composed of only ASCII characters
return all(ord(c) < 128 for c in s)
"""
From ultralytics.yolo.utils.plotting.py
"""
def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):
"""
Takes a mask, and resizes it to the original image size
Args:
im1_shape (tuple): model input shape, [h, w]
masks (torch.Tensor): [h, w, num]
im0_shape (tuple): the original image shape
ratio_pad (tuple): the ratio of the padding to the original image.
Returns:
masks (torch.Tensor): The masks that are being returned.
"""
# Rescale coordinates (xyxy) from im1_shape to im0_shape
if ratio_pad is None: # calculate from im0_shape
gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new
pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding
else:
pad = ratio_pad[1]
top, left = int(pad[1]), int(pad[0]) # y, x
bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])
if len(masks.shape) < 2:
raise ValueError(f'"len of masks shape" should be 2 or 3, but got {len(masks.shape)}')
masks = masks[top:bottom, left:right]
# masks = masks.permute(2, 0, 1).contiguous()
# masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]
# masks = masks.permute(1, 2, 0).contiguous()
masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))
if len(masks.shape) == 2:
masks = masks[:, :, None]
return masks
class Annotator:
# YOLOv8 Annotator for train/val mosaics and jpgs and detect/hub inference annotations
def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'):
assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.'
non_ascii = not is_ascii(example) # non-latin labels, i.e. asian, arabic, cyrillic
self.pil = pil or non_ascii
if self.pil: # use PIL
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
self.font = ImageFont.load_default() # For simplicity and Performance
else: # use cv2
self.im = im
self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width
def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)):
# Add one xyxy box to image with label
if self.pil or not is_ascii(label):
self.draw.rectangle(box, width=self.lw, outline=color) # box
if label:
w, h = self.font.getsize(label) # text width, height (WARNING: deprecated) in 9.2.0
# _, _, w, h = self.font.getbbox(label) # text width, height (New)
outside = box[1] - h >= 0 # label fits outside box
self.draw.rectangle(
(box[0], box[1] - h if outside else box[1], box[0] + w + 1,
box[1] + 1 if outside else box[1] + h + 1),
fill=color,
)
# self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0
self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font)
else: # cv2
p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3]))
cv2.rectangle(self.im, p1, p2, color, thickness=self.lw, lineType=cv2.LINE_AA)
if label:
tf = max(self.lw - 1, 1) # font thickness
w, h = cv2.getTextSize(label, 0, fontScale=self.lw / 3, thickness=tf)[0] # text width, height
outside = p1[1] - h >= 3
p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3
cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled
cv2.putText(self.im,
label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2),
0,
self.lw / 3,
txt_color,
thickness=tf,
lineType=cv2.LINE_AA)
def masks(self, masks, colors, im_gpu, alpha=0.5, retina_masks=False):
"""Plot masks at once.
Args:
masks (tensor): predicted masks on cuda, shape: [n, h, w]
colors (List[List[Int]]): colors for predicted masks, [[r, g, b] * n]
im_gpu (tensor): img is in cuda, shape: [3, h, w], range: [0, 1]
alpha (float): mask transparency: 0.0 fully transparent, 1.0 opaque
"""
if self.pil:
# convert to numpy first
self.im = np.asarray(self.im).copy()
if len(masks) == 0:
self.im[:] = im_gpu.permute(1, 2, 0).contiguous().cpu().numpy() * 255
colors = torch.tensor(colors, device=im_gpu.device, dtype=torch.float32) / 255.0
colors = colors[:, None, None] # shape(n,1,1,3)
masks = masks.unsqueeze(3) # shape(n,h,w,1)
masks_color = masks * (colors * alpha) # shape(n,h,w,3)
inv_alph_masks = (1 - masks * alpha).cumprod(0) # shape(n,h,w,1)
mcs = (masks_color * inv_alph_masks).sum(0) * 2 # mask color summand shape(n,h,w,3)
im_gpu = im_gpu.flip(dims=[0]) # flip channel
im_gpu = im_gpu.permute(1, 2, 0).contiguous() # shape(h,w,3)
im_gpu = im_gpu * inv_alph_masks[-1] + mcs
im_mask = (im_gpu * 255)
im_mask_np = im_mask.byte().cpu().numpy()
self.im[:] = im_mask_np if retina_masks else scale_image(im_gpu.shape, im_mask_np, self.im.shape)
if self.pil:
# convert im back to PIL and update draw
self.fromarray(self.im)
def rectangle(self, xy, fill=None, outline=None, width=1):
# Add rectangle to image (PIL-only)
self.draw.rectangle(xy, fill, outline, width)
def text(self, xy, text, txt_color=(255, 255, 255), anchor='top'):
# Add text to image (PIL-only)
if anchor == 'bottom': # start y from font bottom
w, h = self.font.getsize(text) # text width, height
xy[1] += 1 - h
self.draw.text(xy, text, fill=txt_color, font=self.font)
def fromarray(self, im):
# Update self.im from a numpy array
self.im = im if isinstance(im, Image.Image) else Image.fromarray(im)
self.draw = ImageDraw.Draw(self.im)
def result(self):
# Return annotated image as array
return np.asarray(self.im)
class Colors:
# Ultralytics color palette https://ultralytics.com/
def __init__(self):
# hex = matplotlib.colors.TABLEAU_COLORS.values()
hexs = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
'2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
self.palette = [self.hex2rgb(f'#{c}') for c in hexs]
self.n = len(self.palette)
def __call__(self, i, bgr=False):
c = self.palette[int(i) % self.n]
return (c[2], c[1], c[0]) if bgr else c
@staticmethod
def hex2rgb(h): # rgb order (PIL)
return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
colors = Colors() # create instance for 'from utils.plots import colors'
| CMPUT-492-W2023-Capstone/cstc-backend-v7 | app/src/module.py | module.py | py | 28,870 | python | en | code | 0 | github-code | 36 |
32952196842 | from django.shortcuts import render, redirect
from kajaki_app.models import Route, Kayak, Order, OrderKayak
from django.urls import reverse, reverse_lazy
from datetime import date
from django.views import View
from kajaki_app.forms import AddKayakForm, AddRouteForm, ContactForm
from django.views.generic import ListView, CreateView, UpdateView, DetailView, DeleteView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin, PermissionRequiredMixin
class AddRouteView(View):
# permission_required = ['kajaki_app.add_route']
def get(self, request):
form = AddRouteForm()
return render(request, 'kajaki_app/add_route.html', {'form': form, 'submit_value_text': 'Dodaj'})
def post(self, request):
form = AddRouteForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('add_route'))
return render(request, 'kajaki_app/add_route.html', {'form': form, 'submit_value_text': 'Dodaj'})
class RouteListView(ListView):
model = Route
template_name = 'kajaki_app/route_list.html'
class AddKayakView(View):
# permission_required = ['kajaki_app.add_kayak']
def get(self, request):
form = AddKayakForm()
return render(request, 'kajaki_app/add_kayak.html', {'form': form, 'submit_value_text': 'Dodaj'})
def post(self, request):
form = AddKayakForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('add_kayak'))
return render(request, 'kajaki_app/add_kayak.html', {'form': form, 'submit_value_text': 'Dodaj'})
class KayakListView(ListView):
model = Kayak
template_name = 'kajaki_app/kayak_list.html'
class KayakUpdateView(LoginRequiredMixin, UpdateView):
# permission_required = ['filmy.change_film']
model = Kayak
template_name = 'kajaki_app/add_kayak.html'
fields = '__all__'
def get_success_url(self):
super().get_success_url()
return reverse("add_kayak", args=(self.object.id,))
class KayakDeleteView(LoginRequiredMixin, DeleteView):
model = Kayak
template_name = 'kajaki_app/kayak_delete.html'
success_url = reverse_lazy('kayak_list')
class KayakDetailView(DetailView):
model = Kayak
template_name = 'kajaki_app/details_kayak.html'
class CheckoutView(View):
def get(self, request):
return render(request, 'kajaki_app/checkout.html')
def post(self, request):
name = request.POST.get('name', '')
email = request.POST.get('email', '')
date = request.POST.get('date', '')
phone = request.POST.get('phone', '')
return render(request, 'kajaki_app/checkout.html')
class OrderView(LoginRequiredMixin, View):
def get(self, request):
routes = Route.objects.all()
kayaks = Kayak.objects.all()
return render(request, 'kajaki_app/order.html', {'kayaks': kayaks, 'routes': routes})
def post(self, request):
user = request.user
route = request.POST.get('route')
date = request.POST.get('date')
kayak = request.POST.get('kayak')
amount = request.POST.get('amount')
if route and date and int(amount) >= 1 and kayak:
route = Route.objects.get(name=route)
order = Order.objects.create(route=route, buyer=user, date=date)
kayak = Kayak.objects.get(name=kayak)
order_kayak = OrderKayak.objects.create(kayak=kayak, order=order, amount=amount)
return redirect(reverse('my_account'))
return render(request, 'kajaki_app/order.html', {'message': 'Wypełnij poprawnie wszystkie pola'})
class ContactView(View):
def get(self, request):
form = ContactForm()
return render(request, 'kajaki_app/contact.html', {'form': form, 'submit_value_text': 'Wyślij'})
def post(self, request):
form = ContactForm(request.POST)
if form.is_valid():
form.save()
return redirect(reverse('index'))
return render(request, 'kajaki_app/contact.html', {'form': form, 'submit_value_text': 'Wyślij'})
class AboutUsView(View):
def get(self, request):
return render(request, 'kajaki_app/about_us.html')
| KamilNurzynski/Kajaki | kajaki_app/views.py | views.py | py | 4,243 | python | en | code | 0 | github-code | 36 |
2827309329 | ## Program name: sort_fruits.py
## UoPeople CS-1101 December 2015
## Unit 7
## Roger Stillick Jr.
## The purpose of this program is to read a file containing a list of
## fruits, then sort the list and write the sorted list to a new file.
# Set the working directory of the path
wd = '/home/roger/bin/'
#courtesy prompt
print ('Program started')
# open the files to read and write
infile = open(wd + 'unsorted_fruits.txt', 'r')
outfile = open(wd + 'sorted_fruits.txt', 'w')
# the readlines() method is commented out, because I wanted to try
# a different way to accomplish the same task. It works, so I left it.
#fruit = infile.readlines()
fruit = list(infile)
#sort the list
fruit.sort()
# iterate over the list "fruit", and write the line to "outfile" if the
# line is not blank. The instructors YouTube example was helpful in
# figuring out how to remove the blank lines.
for line in fruit:
if line > "/n":
outfile.write(line)
# don't forget to close the file operations!
infile.close()
outfile.close()
#courtesy prompt
print('''Operation successful, look for the "sorted_fruits.txt" file in
your working directory.''')
| RogerStilly/misc_py | sort_fruits.py | sort_fruits.py | py | 1,147 | python | en | code | 0 | github-code | 36 |
34761383240 | import sys, os
import subprocess
import datetime as dt
from random import randint
import argparse
import web3
from web3 import Web3
from web3.middleware import geth_poa_middleware
from eth_utils import decode_hex
# Project modules
import utils
from TextColor.color import bcolors
URL = "http://127.0.0.1:8545"
ACCOUNT_DB_NAME = 'car.json'
MGMT_CONTRACT_DB_NAME = utils.MGMT_CONTRACT_DB_NAME
MGMT_CONTRACT_SRC_PATH = utils.MGMT_CONTRACT_SRC_PATH
CONFIG = utils.open_data_base(ACCOUNT_DB_NAME)
DATABASE = utils.open_data_base(MGMT_CONTRACT_DB_NAME)
if DATABASE is None:
sys.exit(f"{bcolors.FAIL}Setup hasn't been done{bcolors.ENDC}")
def generate_private_key(_w3: Web3) -> str:
"""
Generate private key for car account using current time and random int
:param Web3 _w3: Web3 instance
:return: Private Key
:rtype: str
"""
t = int(dt.datetime.utcnow().timestamp())
k = randint(0, 2 ** 16)
privateKey = _w3.toHex(_w3.sha3(((t + k).to_bytes(32, 'big'))))
if privateKey[:2] == '0x':
privateKey = privateKey[2:]
return (privateKey)
def new_car_account(_w3: Web3) -> None:
"""
Create new addres for car account
:param Web3 _w3: Web3 instance
"""
privateKey = generate_private_key(_w3)
data = {"key": privateKey}
utils.write_data_base(data, ACCOUNT_DB_NAME)
print(f"{bcolors.HEADER}{_w3.eth.account.privateKeyToAccount(data['key']).address}{bcolors.ENDC}")
def get_car_account_from_db(_w3: Web3) -> None:
"""
Get car account from database
:param Web3 _w3: Web3 instance
"""
return (_w3.eth.account.privateKeyToAccount(utils.get_data_from_db(ACCOUNT_DB_NAME, 'key')).address)
def register_car(_w3: Web3):
"""
Register new car
:param Web3 _w3: Web3 instance
"""
data = utils.open_data_base(MGMT_CONTRACT_DB_NAME)
if data is None:
return f'{bcolors.FAIL}Cannot access management contract database{bcolors.ENDC}'
data = CONFIG
if data is None:
return f'{bcolors.FAIL}Cannot access account database{bcolors.ENDC}'
private_key = data['key']
mgmt_contract = utils.init_management_contract(_w3)
car_address = _w3.eth.account.privateKeyToAccount(private_key).address
registration_required_gas = 50000
gas_price = utils.get_actual_gas_price(_w3)
if registration_required_gas * gas_price > _w3.eth.getBalance(car_address):
return 'No enough funds to send transaction'
nonce = _w3.eth.getTransactionCount(car_address)
tx = {'gasPrice': gas_price, 'nonce': nonce}
regTx = mgmt_contract.functions.registerCar().buildTransaction(tx)
signTx = _w3.eth.account.signTransaction(regTx, private_key)
txHash = _w3.eth.sendRawTransaction(signTx.rawTransaction)
receipt = web3.eth.wait_for_transaction_receipt(_w3, txHash, 120, 0.1)
if receipt.status == 1:
return f'{bcolors.OKGREEN}Registered successfully{bcolors.ENDC}'
else:
return f'{bcolors.FAIL}Car registration failed{bcolors.ENDC}'
def create_parser() -> argparse.ArgumentParser:
"""
Create cli argument parser
:return: Parser
:rtype: argparse.ArgumentParser
"""
parser = argparse.ArgumentParser(
description='Car management tool',
epilog="""
It is expected that Web3 provider specified by WEB3_PROVIDER_URI
environment variable. E.g.
WEB3_PROVIDER_URI=file:///path/to/node/rpc-json/file.ipc
WEB3_PROVIDER_URI=http://192.168.1.2:8545
"""
)
parser.add_argument(
'--new', action='store_true', required=False,
help='Generate a new account for the particular AGV'
)
parser.add_argument(
'--account', action='store_true', required=False,
help='Get identificator (Ethereum address) of AGV from the private key stored in car.json'
)
parser.add_argument(
'--reg', action='store_true', required=False,
help='Register the vehicle in the chain'
)
parser.add_argument(
'--verify', type=str, required=False,
help='Verify battery'
)
parser.add_argument(
'--initiate_replacement', nargs=2, required=False,
help='Initiate deal <car_battery> <sc_battery>'
)
return parser
def ask_for_replacement(car_battery_id: str, sc_battery_id: str, car_address: str) -> None:
"""
Ask service center for replacement approval
:param str car_battery_id: Car's battery
:param str sc_battery_id: Service center's battery
:param str car_address: Car's blockchain address
:return: Nothing
:rtype: None
"""
if os.path.exists(f"scenter.py"):
subprocess.run(
[
"python",
"scenter.py",
"--approve_replacement",
f"{car_battery_id}",
f"{sc_battery_id}",
f"{car_address}",
]
)
else:
sys.exit(f"{bcolors.FAIL}The asked service center does not exists{bcolors.ENDC}")
def get_sc_address() -> str:
"""
Get address of the service center
return: Service center's address
rtype: str
"""
command = "python scenter.py --get_address".split(' ')
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
return result.stdout[:-1]
def transfer_battery_to_sc(w3: Web3, car_battery_id: str, sc_address: str):
"""
Transfer battery to service center
:param Web3 w3: Web3 instance
:param str car_battery_id: Car's battery id
:param str sc_battery_id: Service centers's battery id
return: Nothing
rtype: None
"""
data = utils.open_data_base(MGMT_CONTRACT_DB_NAME)
if data is None:
return 'Cannot access management contract database'
data = utils.open_data_base(ACCOUNT_DB_NAME)
if data is None:
return 'Cannot access account database'
private_key = data['key']
battery_mgmt_contract_addr = utils.get_battery_managment_contract_addr(w3)
battery_mgmt_contract = utils.init_battery_management_contract(w3, battery_mgmt_contract_addr)
car_address = w3.eth.account.privateKeyToAccount(private_key).address
gas_price = utils.get_actual_gas_price(w3)
nonce = w3.eth.getTransactionCount(car_address)
tx = {'gasPrice': gas_price, 'nonce': nonce, 'gas': 2204 * 68 + 21000}
reg_tx = battery_mgmt_contract.functions.transfer(sc_address, decode_hex(car_battery_id)).buildTransaction(tx)
sign_tx = w3.eth.account.signTransaction(reg_tx, private_key)
tx_hash = w3.eth.sendRawTransaction(sign_tx.rawTransaction)
receipt = web3.eth.wait_for_transaction_receipt(w3, tx_hash, 120, 0.1)
if receipt.status != 1:
sys.exit(f"{bcolors.FAIL}The car does not own this battery!{bcolors.ENDC}")
def get_new_battery(car_account: str, car_battery_id: str, sc_battery_id) -> float:
"""
Call battery replacement in service center
:param str car_account: Car account
:param str car_battery_id: Car's battery id
:return: Work's cost
:rtype: float
"""
command = f"python scenter.py --transfer_battery_to_car {car_account} {car_battery_id} {sc_battery_id}".split(' ')
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
return float(result.stdout[:-1])
def initiate_replacement(w3: Web3, car_battery_id: str, sc_battery_id: str) -> None:
"""
Initiate battery replacement
:param Web3 w3: Web3 instance
:param str car_battery_id: Car's battery
:param str sc_battery_id: Service center's battery
:return: Nothing
:rtype: None
"""
sc_battery_id_path = f"firmware/{car_battery_id[:8]}.py"
car_battery_id_path = f"firmware/{sc_battery_id[:8]}.py"
print("Verifying battery...")
data = utils.verify_battery(w3, sc_battery_id_path)
if not data[0]:
sys.exit(f"{bcolors.FAIL}The battery is fake{bcolors.ENDC}")
sys.stdout.write("\033[F") #back to previous line
sys.stdout.write("\033[K") #clear line
print(f"Verifying battery...{bcolors.OKGREEN}Success{bcolors.ENDC}", u'\u2713')
print("Asking service center for replacement...")
ask_for_replacement(car_battery_id, sc_battery_id, get_car_account_from_db(w3))
message = utils.open_data_base('replacement.json')
if message is None:
sys.exit(f"{bcolors.FAIL}Somethong went wrong...{bcolors.ENDC}")
if not message['approved']:
sys.exit(message['error'])
sys.stdout.write("\033[F") #back to previous line
sys.stdout.write("\033[K") #clear line
print(f"Asking service center for replacement...{bcolors.OKGREEN}Approved{bcolors.ENDC}", u'\u2713')
print("Getting address of the service center...")
sc_address = get_sc_address()
sys.stdout.write("\033[F") #back to previous line
sys.stdout.write("\033[K") #clear line
print(f"Getting address of the service center...{bcolors.OKGREEN}Success{bcolors.ENDC}", u'\u2713')
print("Transferring battery to the service center...")
transfer_battery_to_sc(w3, car_battery_id, sc_address)
sys.stdout.write("\033[F") #back to previous line
sys.stdout.write("\033[K") #clear line
print(f"Transferring battery to the service center...{bcolors.OKGREEN}Success{bcolors.ENDC}", u'\u2713')
print("Waiting for new battery installation...")
result = get_new_battery(get_car_account_from_db(w3), car_battery_id, sc_battery_id)
sys.stdout.write("\033[F") #back to previous line
sys.stdout.write("\033[K") #clear line
print(f"Battery was installed...{bcolors.OKGREEN}Success{bcolors.ENDC}", u'\u2713')
return result
def main():
w3 = Web3(Web3.HTTPProvider(URL))
# configure provider to work with PoA chains
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
parser = create_parser()
args = parser.parse_args()
if args.new:
new_car_account(w3)
elif args.account:
print(get_car_account_from_db(w3))
elif args.reg:
print(register_car(w3))
elif args.verify:
data = utils.verify_battery(w3, args.verify)
print(f"Verified: {data[0]}")
print(f"Total charges: {data[1]}")
print(f"Vendor id: {data[2]}")
print(f"Vendor name: {data[3]}")
elif args.initiate_replacement:
cost = initiate_replacement(w3, args.initiate_replacement[0], args.initiate_replacement[1])
print(f"Cost of work: {cost} eth")
if __name__ == "__main__":
main()
| acid9reen/bas | car.py | car.py | py | 10,656 | python | en | code | 0 | github-code | 36 |
19892101650 | #Why the fuck am i doing this shit
t = int(input())
while(t > 0):
n = int(input())
see = map(int,input().split(" "))
mods = [0,0,0]
for i in see:
mods[i%3] +=1
ans = min(mods[1],mods[2])
mods[1] -= ans
mods[2] -= ans
#print(mods[1])
#print(mods[2])
print(ans + mods[0] +mods[2]//3 + mods[1]//3)
t-=1
| af-orozcog/competitiveProgramming | CodeForces/div3/1176B.py | 1176B.py | py | 313 | python | en | code | 0 | github-code | 36 |
17582018412 | import sys
import typing as t
import importlib
from pathlib import Path
import pkg_resources
from starwhale.utils import console
from starwhale.utils.venv import (
guess_current_py_env,
get_user_python_sys_paths,
check_python_interpreter_consistency,
)
def import_object(
workdir: t.Union[Path, str], handler_path: str, py_env: str = ""
) -> t.Any:
workdir_path = str(Path(workdir).absolute())
external_paths = [workdir_path]
py_env = py_env or guess_current_py_env()
_ok, _cur_py, _ex_py = check_python_interpreter_consistency(py_env)
if not _ok:
console.print(
f":speaking_head: [red]swcli python prefix:{_cur_py}, runtime env python prefix:{_ex_py}[/], swcli will inject sys.path"
)
external_paths.extend(get_user_python_sys_paths(py_env))
prev_paths = sys.path[:]
sys_changed = False
for _path in external_paths[::-1]:
if _path not in sys.path:
sys.path.insert(0, _path)
pkg_resources.working_set.add_entry(_path)
sys_changed = True
try:
module_name, handler_name = handler_path.split(":", 1)
console.print(
f":speaking_head: [green]import module:{module_name}, handler:{handler_name}[/]"
)
_module = importlib.import_module(module_name, package=workdir_path)
_obj = getattr(_module, handler_name, None)
if not _obj:
raise ModuleNotFoundError(f"{handler_path}")
except Exception:
console.print_exception()
if sys_changed:
sys.path[:] = prev_paths
raise
return _obj
def load_module(module: str, path: Path) -> t.Any:
workdir_path = str(path.absolute())
external_paths = [workdir_path]
for _path in external_paths[::-1]:
if _path not in sys.path:
sys.path.insert(0, _path)
pkg_resources.working_set.add_entry(_path)
return importlib.import_module(module, package=workdir_path)
| star-whale/starwhale | client/starwhale/utils/load.py | load.py | py | 1,988 | python | en | code | 171 | github-code | 36 |
38666222212 | from __future__ import absolute_import
import logging
import string
from zipfile import ZipFile, ZIP_STORED, ZIP_DEFLATED
import re
# py2 vs py3 transition
from ..six import text_type as unicode
from ..six import string_types as basestring
from ..six import ensure_binary
from io import BytesIO
## XML isn't as forgiving as HTML, so rather than generate as strings,
## use DOM to generate the XML files.
from xml.dom.minidom import getDOMImplementation
import bs4
from .base_writer import BaseStoryWriter
from ..htmlcleanup import stripHTML,removeEntities
from ..story import commaGroups
logger = logging.getLogger(__name__)
class EpubWriter(BaseStoryWriter):
@staticmethod
def getFormatName():
return 'epub'
@staticmethod
def getFormatExt():
return '.epub'
def __init__(self, config, story):
BaseStoryWriter.__init__(self, config, story)
self.EPUB_CSS = string.Template('''${output_css}''')
self.EPUB_TITLE_PAGE_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>${title} by ${author}</title>
<link href="stylesheet.css" type="text/css" rel="stylesheet"/>
</head>
<body class="fff_titlepage">
<h3><a href="${storyUrl}">${title}</a> by ${authorHTML}</h3>
<div>
''')
self.EPUB_TITLE_ENTRY = string.Template('''
<b>${label}:</b> ${value}<br />
''')
self.EPUB_NO_TITLE_ENTRY = string.Template('''
${value}<br />
''')
self.EPUB_TITLE_PAGE_END = string.Template('''
</div>
</body>
</html>
''')
self.EPUB_TABLE_TITLE_PAGE_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>${title} by ${author}</title>
<link href="stylesheet.css" type="text/css" rel="stylesheet"/>
</head>
<body class="fff_titlepage">
<h3><a href="${storyUrl}">${title}</a> by ${authorHTML}</h3>
<table class="full">
''')
self.EPUB_TABLE_TITLE_ENTRY = string.Template('''
<tr><td><b>${label}:</b></td><td>${value}</td></tr>
''')
self.EPUB_TABLE_TITLE_WIDE_ENTRY = string.Template('''
<tr><td colspan="2"><b>${label}:</b> ${value}</td></tr>
''')
self.EPUB_TABLE_NO_TITLE_ENTRY = string.Template('''
<tr><td colspan="2">${label}${value}</td></tr>
''')
self.EPUB_TABLE_TITLE_PAGE_END = string.Template('''
</table>
</body>
</html>
''')
self.EPUB_TOC_PAGE_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>${title} by ${author}</title>
<link href="stylesheet.css" type="text/css" rel="stylesheet"/>
</head>
<body class="fff_tocpage">
<div>
<h3>Table of Contents</h3>
''')
self.EPUB_TOC_ENTRY = string.Template('''
<a href="file${index04}.xhtml">${chapter}</a><br />
''')
self.EPUB_TOC_PAGE_END = string.Template('''
</div>
</body>
</html>
''')
self.EPUB_CHAPTER_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>${chapter}</title>
<link href="stylesheet.css" type="text/css" rel="stylesheet"/>
<meta name="chapterurl" content="${url}" />
<meta name="chapterorigtitle" content="${origchapter}" />
<meta name="chaptertoctitle" content="${tocchapter}" />
<meta name="chaptertitle" content="${chapter}" />
</head>
<body class="fff_chapter">
<h3 class="fff_chapter_title">${chapter}</h3>
''')
self.EPUB_CHAPTER_END = string.Template('''
</body>
</html>
''')
self.EPUB_LOG_PAGE_START = string.Template('''<?xml version="1.0" encoding="UTF-8"?>
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<title>Update Log</title>
<link href="stylesheet.css" type="text/css" rel="stylesheet"/>
</head>
<body class="fff_logpage">
<h3>Update Log</h3>
''')
self.EPUB_LOG_UPDATE_START = string.Template('''
<p class='log_entry'>
''')
self.EPUB_LOG_ENTRY = string.Template('''
<b>${label}:</b> <span id="${id}">${value}</span>
''')
self.EPUB_LOG_UPDATE_END = string.Template('''
</p>
<hr/>
''')
self.EPUB_LOG_PAGE_END = string.Template('''
</body>
</html>
''')
self.EPUB_LOG_PAGE_END = string.Template('''
</body>
</html>
''')
self.EPUB_COVER = string.Template('''
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"><head><title>Cover</title><style type="text/css" title="override_css">
@page {padding: 0pt; margin:0pt}
body { text-align: center; padding:0pt; margin: 0pt; }
div { margin: 0pt; padding: 0pt; }
</style></head><body class="fff_coverpage"><div>
<img src="${coverimg}" alt="cover"/>
</div></body></html>
''')
def writeLogPage(self, out):
"""
Write the log page, but only include entries that there's
metadata for. START, ENTRY and END are expected to already be
string.Template(). START and END are expected to use the same
names as Story.metadata, but ENTRY should use id, label and value.
"""
if self.hasConfig("logpage_start"):
START = string.Template(self.getConfig("logpage_start"))
else:
START = self.EPUB_LOG_PAGE_START
if self.hasConfig("logpage_end"):
END = string.Template(self.getConfig("logpage_end"))
else:
END = self.EPUB_LOG_PAGE_END
# if there's a self.story.logfile, there's an existing log
# to add to.
if self.story.logfile:
logger.debug("existing logfile found, appending")
# logger.debug("existing data:%s"%self._getLastLogData(self.story.logfile))
replace_string = "</body>" # "</h3>"
self._write(out,self.story.logfile.replace(replace_string,self._makeLogEntry(self._getLastLogData(self.story.logfile))+replace_string))
else:
# otherwise, write a new one.
self._write(out,START.substitute(self.story.getAllMetadata()))
self._write(out,self._makeLogEntry())
self._write(out,END.substitute(self.story.getAllMetadata()))
# self parsing instead of Soup because it should be simple and not
# worth the overhead.
def _getLastLogData(self,logfile):
"""
Make a dict() of the most recent(last) log entry for each piece of metadata.
Switch rindex to index to search from top instead of bottom.
"""
values = {}
for entry in self.getConfigList("logpage_entries") + self.getConfigList("extra_logpage_entries"):
try:
# <span id="dateUpdated">1975-04-15</span>
span = '<span id="%s">'%entry
idx = logfile.rindex(span)+len(span)
values[entry] = logfile[idx:logfile.index('</span>\n',idx)]
except Exception as e:
#print("e:%s"%e)
pass
return values
def _makeLogEntry(self, oldvalues={}):
if self.hasConfig("logpage_update_start"):
START = string.Template(self.getConfig("logpage_update_start"))
else:
START = self.EPUB_LOG_UPDATE_START
if self.hasConfig("logpage_entry"):
ENTRY = string.Template(self.getConfig("logpage_entry"))
else:
ENTRY = self.EPUB_LOG_ENTRY
if self.hasConfig("logpage_update_end"):
END = string.Template(self.getConfig("logpage_update_end"))
else:
END = self.EPUB_LOG_UPDATE_END
retval = START.substitute(self.story.getAllMetadata())
## words_added is only used in logpage because it's the only
## place we know the previous version's word count.
if 'words_added' in (self.getConfigList("logpage_entries") + self.getConfigList("extra_logpage_entries")):
new_words = self.story.getMetadata('numWords')
old_words = oldvalues.get('numWords',None)
if new_words and old_words:
self.story.setMetadata('words_added',commaGroups(unicode(int(new_words.replace(',',''))-int(old_words.replace(',','')))))
for entry in self.getConfigList("logpage_entries") + self.getConfigList("extra_logpage_entries"):
if self.isValidMetaEntry(entry):
val = self.story.getMetadata(entry)
if val and ( entry not in oldvalues or val != oldvalues[entry] ):
label=self.get_label(entry)
# if self.hasConfig(entry+"_label"):
# label=self.getConfig(entry+"_label")
# elif entry in self.titleLabels:
# logger.debug("Using fallback label for %s_label"%entry)
# label=self.titleLabels[entry]
# else:
# label="%s"%entry.title()
# logger.debug("No known label for %s, fallback to '%s'"%(entry,label))
retval = retval + ENTRY.substitute({'id':entry,
'label':label,
'value':val})
else:
# could be useful for introducing extra text, but
# mostly it makes it easy to tell when you get the
# keyword wrong.
retval = retval + entry
retval = retval + END.substitute(self.story.getAllMetadata())
if self.getConfig('replace_hr'):
# replacing a self-closing tag with a container tag in the
# soup is more difficult than it first appears. So cheat.
retval = re.sub("<hr[^>]*>","<div class='center'>* * *</div>",retval)
return retval
def writeStoryImpl(self, out):
if self.story.oldcover and \
( (self.getConfig('use_old_cover') and
self.story.getMetadata('cover_image') != 'force' ) or not self.story.cover ):
# logger.debug("use_old_cover:%s"%self.getConfig('use_old_cover'))
self.use_oldcover = True
self.story.setMetadata('cover_image','old')
else:
self.use_oldcover = False
## Python 2.5 ZipFile is rather more primative than later
## versions. It can operate on a file, or on a BytesIO, but
## not on an open stream. OTOH, I suspect we would have had
## problems with closing and opening again to change the
## compression type anyway.
zipio = BytesIO()
## mimetype must be first file and uncompressed. Python 2.5
## ZipFile can't change compression type file-by-file, so we
## have to close and re-open
outputepub = ZipFile(zipio, 'w', compression=ZIP_STORED)
outputepub.debug=3
outputepub.writestr('mimetype','application/epub+zip')
outputepub.close()
## Re-open file for content.
outputepub = ZipFile(zipio, 'a', compression=ZIP_DEFLATED)
outputepub.debug=3
## Create META-INF/container.xml file. The only thing it does is
## point to content.opf
containerdom = getDOMImplementation().createDocument(None, "container", None)
containertop = containerdom.documentElement
containertop.setAttribute("version","1.0")
containertop.setAttribute("xmlns","urn:oasis:names:tc:opendocument:xmlns:container")
rootfiles = containerdom.createElement("rootfiles")
containertop.appendChild(rootfiles)
rootfiles.appendChild(newTag(containerdom,"rootfile",{"full-path":"content.opf",
"media-type":"application/oebps-package+xml"}))
outputepub.writestr("META-INF/container.xml",containerdom.toxml(encoding='utf-8'))
containerdom.unlink()
del containerdom
## Epub has two metadata files with real data. We're putting
## them in content.opf (pointed to by META-INF/container.xml)
## and toc.ncx (pointed to by content.opf)
## content.opf contains metadata, a 'manifest' list of all
## other included files, and another 'spine' list of the items in the
## file
uniqueid= 'fanficfare-uid:%s-u%s-s%s' % (
self.getMetadata('site'),
self.story.getList('authorId')[0],
self.getMetadata('storyId'))
contentdom = getDOMImplementation().createDocument(None, "package", None)
package = contentdom.documentElement
## might want 3.1 or something in future.
epub3 = self.getConfig("epub_version",default="2.0").startswith("3")
if epub3:
package.setAttribute("version","3.0")
else:
package.setAttribute("version","2.0")
logger.info("Saving EPUB Version "+package.getAttribute("version"))
package.setAttribute("xmlns","http://www.idpf.org/2007/opf")
package.setAttribute("unique-identifier","fanficfare-uid")
metadata=newTag(contentdom,"metadata",
attrs={"xmlns:dc":"http://purl.org/dc/elements/1.1/",
"xmlns:opf":"http://www.idpf.org/2007/opf"})
package.appendChild(metadata)
metadata.appendChild(newTag(contentdom,"dc:identifier",
text=uniqueid,
attrs={"id":"fanficfare-uid"}))
if self.getMetadata('title'):
metadata.appendChild(newTag(contentdom,"dc:title",text=self.getMetadata('title'),
attrs={"id":"id"}))
def creator_attrs(idnum):
if epub3:
return {"id":"id-%d"%idnum}
else:
return {"opf:role":"aut"}
idnum = 1
if self.getMetadata('author'):
if self.story.isList('author'):
for auth in self.story.getList('author'):
metadata.appendChild(newTag(contentdom,"dc:creator",
attrs=creator_attrs(idnum),
text=auth))
idnum += 1
else:
metadata.appendChild(newTag(contentdom,"dc:creator",
attrs=creator_attrs(idnum),
text=self.getMetadata('author')))
idnum += 1
metadata.appendChild(newTag(contentdom,"dc:contributor",text="FanFicFare [https://github.com/JimmXinu/FanFicFare]",
attrs={"id":"id-%d"%idnum}))
idnum += 1
# metadata.appendChild(newTag(contentdom,"dc:rights",text=""))
if self.story.getMetadata('langcode'):
langcode=self.story.getMetadata('langcode')
else:
langcode='en'
metadata.appendChild(newTag(contentdom,"dc:language",text=langcode))
# published, created, updated, calibre
# Leave calling self.story.getMetadataRaw directly in case date format changes.
if epub3:
## epub3 requires an updated modified date on every change of
## any kind, not just *content* change.
from datetime import datetime
metadata.appendChild(newTag(contentdom,"meta",
attrs={"property":"dcterms:modified"},
text=datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")))
else:
if self.story.getMetadataRaw('datePublished'):
metadata.appendChild(newTag(contentdom,"dc:date",
attrs={"opf:event":"publication"},
text=self.story.getMetadataRaw('datePublished').strftime("%Y-%m-%d")))
if self.story.getMetadataRaw('dateCreated'):
metadata.appendChild(newTag(contentdom,"dc:date",
attrs={"opf:event":"creation"},
text=self.story.getMetadataRaw('dateCreated').strftime("%Y-%m-%d")))
if self.story.getMetadataRaw('dateUpdated'):
metadata.appendChild(newTag(contentdom,"dc:date",
attrs={"opf:event":"modification"},
text=self.story.getMetadataRaw('dateUpdated').strftime("%Y-%m-%d")))
metadata.appendChild(newTag(contentdom,"meta",
attrs={"name":"calibre:timestamp",
"content":self.story.getMetadataRaw('dateUpdated').strftime("%Y-%m-%dT%H:%M:%S")}))
series = self.story.getMetadata('series')
if series and self.getConfig('calibre_series_meta'):
series_index = "0.0"
if '[' in series:
# logger.debug(series)
## assumed "series [series_index]"
series_index = series[series.rindex(' [')+2:-1]
series = series[:series.rindex(' [')]
## calibre always outputs a series_index and it's
## always a float with 1 or 2 decimals. FFF usually
## has either an integer or no index. (injected
## calibre series is the only float at this time)
series_index = "%.2f" % float(series_index)
metadata.appendChild(newTag(contentdom,"meta",
attrs={"name":"calibre:series",
"content":series}))
metadata.appendChild(newTag(contentdom,"meta",
attrs={"name":"calibre:series_index",
"content":series_index}))
if self.getMetadata('description'):
metadata.appendChild(newTag(contentdom,"dc:description",text=
self.getMetadata('description')))
for subject in self.story.getSubjectTags():
metadata.appendChild(newTag(contentdom,"dc:subject",text=subject))
if self.getMetadata('site'):
metadata.appendChild(newTag(contentdom,"dc:publisher",
text=self.getMetadata('site')))
if self.getMetadata('storyUrl'):
if epub3:
metadata.appendChild(newTag(contentdom,"dc:identifier",
text="URL:"+self.getMetadata('storyUrl')))
else:
metadata.appendChild(newTag(contentdom,"dc:identifier",
attrs={"opf:scheme":"URL"},
text=self.getMetadata('storyUrl')))
metadata.appendChild(newTag(contentdom,"dc:source",
text=self.getMetadata('storyUrl')))
if epub3:
# <meta refines="#id" property="title-type">main</meta>
metadata.appendChild(newTag(contentdom,"meta",
attrs={"property":"title-type",
"refines":"#id",
},
text="main"))
# epub3 removes attrs that identify dc:creator and
# dc:contributor types and instead put them here.
# 'aut' for 1-(idnum-1)
for j in range(1,idnum-1):
#<meta property="role" refines="#id-1" scheme="marc:relators">aut</meta>
metadata.appendChild(newTag(contentdom,"meta",
attrs={"property":"role",
"refines":"#id-%d"%j,
"scheme":"marc:relators",
},
text="aut"))
metadata.appendChild(newTag(contentdom,"meta",
attrs={"property":"role",
"refines":"#id-%d"%(idnum-1),
"scheme":"marc:relators",
},
text="bkp"))
## end of metadata, create manifest.
items = [] # list of (id, href, type, title) tuples(all strings)
itemrefs = [] # list of strings -- idrefs from .opfs' spines
items.append(("ncx","toc.ncx","application/x-dtbncx+xml",None)) ## we'll generate the toc.ncx file,
## but it needs to be in the items manifest.
guide = None
coverIO = None
coverimgid = "image0000"
if self.use_oldcover:
logger.debug("using old cover")
(oldcoverhtmlhref,
oldcoverhtmltype,
oldcoverhtmldata,
oldcoverimghref,
oldcoverimgtype,
oldcoverimgdata) = self.story.oldcover
outputepub.writestr(oldcoverhtmlhref,oldcoverhtmldata)
outputepub.writestr(oldcoverimghref,oldcoverimgdata)
coverimgid = "image0"
items.append((coverimgid,
oldcoverimghref,
oldcoverimgtype,
None))
items.append(("cover",oldcoverhtmlhref,oldcoverhtmltype,None))
itemrefs.append("cover")
metadata.appendChild(newTag(contentdom,"meta",{"content":"image0",
"name":"cover"}))
guide = newTag(contentdom,"guide")
guide.appendChild(newTag(contentdom,"reference",attrs={"type":"cover",
"title":"Cover",
"href":oldcoverhtmlhref}))
if self.getConfig('include_images'):
imgcount=0
for imgmap in self.story.getImgUrls():
imgfile = "OEBPS/"+imgmap['newsrc']
# don't overwrite old cover.
if not self.use_oldcover or imgfile != oldcoverimghref:
outputepub.writestr(imgfile,imgmap['data'])
items.append(("image%04d"%imgcount,
imgfile,
imgmap['mime'],
None))
imgcount+=1
if 'cover' in imgfile:
# make sure coverimgid is set to the cover, not
# just the first image.
coverimgid = items[-1][0]
items.append(("style","OEBPS/stylesheet.css","text/css",None))
if self.story.cover and not self.use_oldcover:
# Note that the id of the cover xhmtl *must* be 'cover'
# for it to work on Nook.
items.append(("cover","OEBPS/cover.xhtml","application/xhtml+xml",None))
itemrefs.append("cover")
#
# <meta name="cover" content="cover.jpg"/>
metadata.appendChild(newTag(contentdom,"meta",{"content":coverimgid,
"name":"cover"}))
# cover stuff for later:
# at end of <package>:
# <guide>
# <reference type="cover" title="Cover" href="Text/cover.xhtml"/>
# </guide>
guide = newTag(contentdom,"guide")
guide.appendChild(newTag(contentdom,"reference",attrs={"type":"cover",
"title":"Cover",
"href":"OEBPS/cover.xhtml"}))
if self.hasConfig("cover_content"):
COVER = string.Template(self.getConfig("cover_content"))
else:
COVER = self.EPUB_COVER
coverIO = BytesIO()
self._write(coverIO,COVER.substitute(dict(list(self.story.getAllMetadata().items())+list({'coverimg':self.story.cover}.items()))))
if self.getConfig("include_titlepage"):
items.append(("title_page","OEBPS/title_page.xhtml","application/xhtml+xml","Title Page"))
itemrefs.append("title_page")
if self.story.getChapterCount() > 1 and self.getConfig("include_tocpage") and not self.metaonly :
items.append(("toc_page","OEBPS/toc_page.xhtml","application/xhtml+xml","Table of Contents"))
itemrefs.append("toc_page")
## save where to insert logpage.
logpage_indices = (len(items),len(itemrefs))
dologpage = ( self.getConfig("include_logpage") == "smart" and \
(self.story.logfile or self.story.getMetadataRaw("status") == "In-Progress") ) \
or self.getConfig("include_logpage") == "true"
## collect chapter urls and file names for internalize_text_links option.
chapurlmap = {}
for index, chap in enumerate(self.story.getChapters(fortoc=True)):
if chap['html']:
i=index+1
items.append(("file%s"%chap['index04'],
"OEBPS/file%s.xhtml"%chap['index04'],
"application/xhtml+xml",
chap['title']))
itemrefs.append("file%s"%chap['index04'])
chapurlmap[chap['url']]="file%s.xhtml"%chap['index04'] # url -> relative epub file name.
if dologpage:
if self.getConfig("logpage_at_end") == "true":
## insert logpage after chapters.
logpage_indices = (len(items),len(itemrefs))
items.insert(logpage_indices[0],("log_page","OEBPS/log_page.xhtml","application/xhtml+xml","Update Log"))
itemrefs.insert(logpage_indices[1],"log_page")
manifest = contentdom.createElement("manifest")
package.appendChild(manifest)
for item in items:
(id,href,type,title)=item
manifest.appendChild(newTag(contentdom,"item",
attrs={'id':id,
'href':href,
'media-type':type}))
if epub3:
# epub3 nav
# <item href="nav.xhtml" id="nav" media-type="application/xhtml+xml" properties="nav"/>
manifest.appendChild(newTag(contentdom,"item",
attrs={'href':'nav.xhtml',
'id':'nav',
'media-type':'application/xhtml+xml',
'properties':'nav'
}))
spine = newTag(contentdom,"spine",attrs={"toc":"ncx"})
package.appendChild(spine)
for itemref in itemrefs:
spine.appendChild(newTag(contentdom,"itemref",
attrs={"idref":itemref,
"linear":"yes"}))
# guide only exists if there's a cover.
if guide:
package.appendChild(guide)
# write content.opf to zip.
contentxml = contentdom.toxml(encoding='utf-8')
# tweak for brain damaged Nook STR. Nook insists on name before content.
contentxml = contentxml.replace(ensure_binary('<meta content="%s" name="cover"/>'%coverimgid),
ensure_binary('<meta name="cover" content="%s"/>'%coverimgid))
outputepub.writestr("content.opf",contentxml)
contentdom.unlink()
del contentdom
## create toc.ncx file
tocncxdom = getDOMImplementation().createDocument(None, "ncx", None)
ncx = tocncxdom.documentElement
ncx.setAttribute("version","2005-1")
ncx.setAttribute("xmlns","http://www.daisy.org/z3986/2005/ncx/")
head = tocncxdom.createElement("head")
ncx.appendChild(head)
head.appendChild(newTag(tocncxdom,"meta",
attrs={"name":"dtb:uid", "content":uniqueid}))
head.appendChild(newTag(tocncxdom,"meta",
attrs={"name":"dtb:depth", "content":"1"}))
head.appendChild(newTag(tocncxdom,"meta",
attrs={"name":"dtb:totalPageCount", "content":"0"}))
head.appendChild(newTag(tocncxdom,"meta",
attrs={"name":"dtb:maxPageNumber", "content":"0"}))
docTitle = tocncxdom.createElement("docTitle")
docTitle.appendChild(newTag(tocncxdom,"text",text=self.getMetadata('title')))
ncx.appendChild(docTitle)
tocnavMap = tocncxdom.createElement("navMap")
ncx.appendChild(tocnavMap)
# <navPoint id="<id>" playOrder="<risingnumberfrom0>">
# <navLabel>
# <text><chapter title></text>
# </navLabel>
# <content src="<chapterfile>"/>
# </navPoint>
index=0
for item in items:
(id,href,type,title)=item
# only items to be skipped, cover.xhtml, images, toc.ncx, stylesheet.css, should have no title.
if title :
navPoint = newTag(tocncxdom,"navPoint",
attrs={'id':id,
'playOrder':unicode(index)})
tocnavMap.appendChild(navPoint)
navLabel = newTag(tocncxdom,"navLabel")
navPoint.appendChild(navLabel)
## the xml library will re-escape as needed.
navLabel.appendChild(newTag(tocncxdom,"text",text=stripHTML(title)))
navPoint.appendChild(newTag(tocncxdom,"content",attrs={"src":href}))
index=index+1
# write toc.ncx to zip file
outputepub.writestr("toc.ncx",tocncxdom.toxml(encoding='utf-8'))
tocncxdom.unlink()
del tocncxdom
if epub3:
##############################################################################################################
## create nav.xhtml file
tocnavdom = getDOMImplementation().createDocument(None, "html", None)
navxhtml = tocnavdom.documentElement
navxhtml.setAttribute("xmlns","http://www.w3.org/1999/xhtml")
navxhtml.setAttribute("xmlns:epub","http://www.idpf.org/2007/ops")
navxhtml.setAttribute("lang",langcode)
navxhtml.setAttribute("xml:lang",langcode)
head = tocnavdom.createElement("head")
navxhtml.appendChild(head)
head.appendChild(newTag(tocnavdom,"title",text="Navigation"))
# <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
head.appendChild(newTag(tocnavdom,"meta",
attrs={"http-equiv":"Content-Type",
"content":"text/html; charset=utf-8"}))
body = tocnavdom.createElement("body")
navxhtml.appendChild(body)
nav = newTag(tocnavdom,"nav",
attrs={"epub:type":"toc"})
body.appendChild(nav)
ol = newTag(tocnavdom,"ol")
nav.appendChild(ol)
for item in items:
(id,href,type,title)=item
# only items to be skipped, cover.xhtml, images, toc.nav,
# stylesheet.css, should have no title.
if title:
li = newTag(tocnavdom,"li")
ol.appendChild(li)
atag = newTag(tocnavdom,"a",
attrs={"href":href},
text=stripHTML(title))
li.appendChild(atag)
if self.story.cover and not self.use_oldcover:
# <nav epub:type="landmarks" hidden="">
# <ol>
# <li><a href="OEBPS/cover.xhtml" epub:type="cover">Cover</a></li>
# </ol>
# </nav>
nav = newTag(tocnavdom,"nav",
attrs={"epub:type":"landmarks",
"hidden":""})
body.appendChild(nav)
ol = newTag(tocnavdom,"ol")
nav.appendChild(ol)
li = newTag(tocnavdom,"li")
ol.appendChild(li)
atag = newTag(tocnavdom,"a",
attrs={"href":"OEBPS/cover.xhtml",
"epub:type":"cover"},
text="Cover")
li.appendChild(atag)
# write nav.xhtml to zip file
outputepub.writestr("nav.xhtml",tocnavdom.toxml(encoding='utf-8'))
tocnavdom.unlink()
del tocnavdom
##############################################################################################################
# write stylesheet.css file.
outputepub.writestr("OEBPS/stylesheet.css",self.EPUB_CSS.substitute(self.story.getAllMetadata()))
# write title page.
if self.getConfig("titlepage_use_table"):
TITLE_PAGE_START = self.EPUB_TABLE_TITLE_PAGE_START
TITLE_ENTRY = self.EPUB_TABLE_TITLE_ENTRY
WIDE_TITLE_ENTRY = self.EPUB_TABLE_TITLE_WIDE_ENTRY
NO_TITLE_ENTRY = self.EPUB_TABLE_NO_TITLE_ENTRY
TITLE_PAGE_END = self.EPUB_TABLE_TITLE_PAGE_END
else:
TITLE_PAGE_START = self.EPUB_TITLE_PAGE_START
TITLE_ENTRY = self.EPUB_TITLE_ENTRY
WIDE_TITLE_ENTRY = self.EPUB_TITLE_ENTRY # same, only wide in tables.
NO_TITLE_ENTRY = self.EPUB_NO_TITLE_ENTRY
TITLE_PAGE_END = self.EPUB_TITLE_PAGE_END
if coverIO:
outputepub.writestr("OEBPS/cover.xhtml",coverIO.getvalue())
coverIO.close()
titlepageIO = BytesIO()
self.writeTitlePage(out=titlepageIO,
START=TITLE_PAGE_START,
ENTRY=TITLE_ENTRY,
WIDE_ENTRY=WIDE_TITLE_ENTRY,
END=TITLE_PAGE_END,
NO_TITLE_ENTRY=NO_TITLE_ENTRY)
if titlepageIO.getvalue(): # will be false if no title page.
outputepub.writestr("OEBPS/title_page.xhtml",titlepageIO.getvalue())
titlepageIO.close()
# write toc page.
tocpageIO = BytesIO()
self.writeTOCPage(tocpageIO,
self.EPUB_TOC_PAGE_START,
self.EPUB_TOC_ENTRY,
self.EPUB_TOC_PAGE_END)
if tocpageIO.getvalue(): # will be false if no toc page.
outputepub.writestr("OEBPS/toc_page.xhtml",tocpageIO.getvalue())
tocpageIO.close()
if dologpage:
# write log page.
logpageIO = BytesIO()
self.writeLogPage(logpageIO)
outputepub.writestr("OEBPS/log_page.xhtml",logpageIO.getvalue())
logpageIO.close()
if self.hasConfig('chapter_start'):
CHAPTER_START = string.Template(self.getConfig("chapter_start"))
else:
CHAPTER_START = self.EPUB_CHAPTER_START
if self.hasConfig('chapter_end'):
CHAPTER_END = string.Template(self.getConfig("chapter_end"))
else:
CHAPTER_END = self.EPUB_CHAPTER_END
for index, chap in enumerate(self.story.getChapters()): # (url,title,html)
# logger.debug("chapter:%s %s %s"%(len(chap['html']), chap['title'],chap['url']))
if chap['html']:
chap_data = chap['html']
if self.getConfig('internalize_text_links'):
soup = bs4.BeautifulSoup(chap['html'],'html5lib')
changed=False
for alink in soup.find_all('a'):
## Chapters can be inserted in the middle
## which can break existing internal links.
## So let's save the original href and update.
# logger.debug("found %s"%alink)
if alink.has_attr('data-orighref') and alink['data-orighref'] in chapurlmap:
alink['href']=chapurlmap[alink['data-orighref']]
# logger.debug("set1 %s"%alink)
changed=True
elif alink.has_attr('href') and alink['href'] in chapurlmap:
if not alink['href'].startswith('file'):
# only save orig href if not already internal.
alink['data-orighref']=alink['href']
alink['href']=chapurlmap[alink['href']]
# logger.debug("set2 %s"%alink)
changed=True
if changed:
chap_data = unicode(soup)
# Don't want html, head or body tags in
# chapter html--bs4 insists on adding them.
chap_data = re.sub(r"</?(html|head|body)[^>]*>\r?\n?","",chap_data)
# logger.debug('Writing chapter text for: %s' % chap.title)
chap['url']=removeEntities(chap['url'])
chap['chapter']=removeEntities(chap['chapter'])
chap['title']=removeEntities(chap['title'])
chap['origchapter']=removeEntities(chap['origtitle'])
chap['tocchapter']=removeEntities(chap['toctitle'])
# escape double quotes in all vals.
for k,v in chap.items():
if isinstance(v,basestring): chap[k]=v.replace('"','"')
fullhtml = CHAPTER_START.substitute(chap) + \
chap_data.strip() + \
CHAPTER_END.substitute(chap)
# strip to avoid ever growning numbers of newlines.
# ffnet(& maybe others) gives the whole chapter text
# as one line. This causes problems for nook(at
# least) when the chapter size starts getting big
# (200k+)
fullhtml = re.sub(r'(</p>|<br ?/>)\n*',r'\1\n',fullhtml)
# logger.debug("write OEBPS/file%s.xhtml"%chap['index04'])
outputepub.writestr("OEBPS/file%s.xhtml"%chap['index04'],fullhtml.encode('utf-8'))
del fullhtml
if self.story.calibrebookmark:
outputepub.writestr("META-INF/calibre_bookmarks.txt",self.story.calibrebookmark)
# declares all the files created by Windows. otherwise, when
# it runs in appengine, windows unzips the files as 000 perms.
for zf in outputepub.filelist:
zf.create_system = 0
outputepub.close()
out.write(zipio.getvalue())
zipio.close()
## Utility method for creating new tags.
def newTag(dom,name,attrs=None,text=None):
tag = dom.createElement(name)
if( attrs is not None ):
for attr in attrs.keys():
tag.setAttribute(attr,attrs[attr])
if( text is not None ):
tag.appendChild(dom.createTextNode(text))
return tag
| JimmXinu/FanFicFare | fanficfare/writers/writer_epub.py | writer_epub.py | py | 39,444 | python | en | code | 664 | github-code | 36 |
35564937510 | # iseseisev harjutus 1
# Kristofer Andres
# 08.03.2022
print("tere, maailm!")
aasta = 2020
liblikas = "teelehemosaiikliblikas"
lause_keskosa = ". aasta liblikas on "
lause = str(aasta)+lause_keskosa+liblikas
print(lause)
kõrgus = float(input("sisesta pilve kõrgus kilomeetrites: "))
if kõrgus >= 6:
print("need on ülemised pilved")
else:
print("need ei ole ülemised pilved")
inimesed = 20
kohad = 40
buss= inimesed // kohad +1
viimaneb = inimesed % kohad
if inimesed % kohad == 0:
buss = inimesed // kohad
viimaneb = kohad
print(f"inimeste arv: {inimesed}")
print(f"kohtade arv: {kohad}")
print (f" busse vaja: {buss}")
print (f"viimases bussis inimesi: {viimaneb}")
| kristoferandres/iseseisvad_ylesanded | iseseisev 1.py | iseseisev 1.py | py | 738 | python | et | code | 0 | github-code | 36 |
27688638873 | """Config file and logging related utility functions."""
import configparser
import json
import os
import sys
from pprint import pprint
import yaml
def read_cfg(location, verbose=True):
"""
Read config file at location using ConfigParser.
Parameters
----------
location : str
Where the config file is located
verbose : bool, optional, defaults to True
Should print the contents of the read config file.
Returns
-------
ConfigParser
The python ConfigParser object after reading the cfg.
"""
if not os.path.exists(location):
raise ValueError(f"Config file {location} does not exist")
config = configparser.ConfigParser()
config.read(location)
if verbose:
print_cfg(config, "Program started with configuration")
return config
def print_cfg(config, msg=""):
"""
Print the contents of a ConfigParser object.
Parameters
----------
config : ConfigParser
The ConfigParser to print the contents of.
msg: str, optional, defaults to ""
Message to print before printing the config file.
Returns
-------
None
"""
if msg != "":
print(msg)
config_dict = [{x: tuple(config.items(x))} for x in config.sections()]
pprint(config_dict, width=120)
def parse_args(parser, verbose=True):
"""
Parse command line arguments into a Namespace.
Parameters
----------
verbose : bool, optional, defaults to True
Should print the values of the command line args.
Returns
-------
Namespace
Parsed arguments.
Raises
------
ValueError
If any arguments are passed which are not used in program.
"""
args, unparsed = parser.parse_known_args()
if len(unparsed) != 0:
raise ValueError(
"Unrecognised command line arguments passed {}".format(unparsed)
)
if verbose:
if len(sys.argv) > 1:
print("Command line arguments", args)
return args
def read_python(path, dirname_replacement=""):
"""
Execute a python script at path.
The script is expected to have items visible at global scope,
which are stored as metadata.
Note
----
The string "__thisdirname__" is magic and will be replaced by the
absolute path to the directory containing the script.
The string "__dirname__" is also magic and will be replaced by
the value of dirname_replacement.
Parameters
----------
path : string
The location of the python script.
dirname_replacement : string, optional, optional, defaults to None
What to replace __dirname__ with.
By default, None will replace __dirname__ with dirname of path.
Returns
-------
dict
The scripts global scope variables stored in a dictionary.
"""
def normalise_path(pth):
s = os.path.abspath(pth)
s = s.replace(os.sep, "/")
return s
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
raise ValueError("{} does not exist to read".format(path))
with open(path, "r") as f:
contents = f.read()
if dirname_replacement != "":
contents = contents.replace("__dirname__", normalise_path(dirname_replacement))
else:
contents = contents.replace(
"__dirname__", normalise_path(os.path.dirname(path))
)
contents = contents.replace(
"__thisdirname__", normalise_path(os.path.dirname(path))
)
metadata = {}
try:
exec(contents, {}, metadata)
except Exception as e:
import traceback
print("QUITTING: An error occurred reading {}".format(path))
traceback.print_exc()
exit(-1)
metadata = {k.lower(): v for (k, v) in metadata.items()}
return metadata
def read_yaml(path):
with open(path, "r") as stream:
parsed_yaml = yaml.safe_load(stream)
return parsed_yaml
def read_json(path):
with open(path, "r") as stream:
parsed_json = json.load(stream)
return parsed_json
def split_dict(in_dict, index):
"""
Grab the value at index from each list in the dictionary.
Parameters
----------
in_dict : dict
The dictionary to grab from
index : int
The index in the lists to pull from
Returns
-------
dict
The original dictionary but with index values pulled out.
"""
new_dict = {}
for key, value in in_dict.items():
if isinstance(value, list):
new_dict[key] = value[index]
return new_dict
def convert_dict_to_string(in_dict, name):
"""
Convert the underlying parameters dictionary to string.
Can be useful for printing or writing to a file.
Does not overwrite default __str__ as the output is quite verbose.
Parameters
----------
in_dict : dict
Input dictionary
Returns
-------
str
The string representation of the dict.
"""
def _val_to_str(val):
"""
Convert a value to a string.
One caveat, if a string is passed, it returns
the original string wrapped in quotes.
Parameters
----------
val : object
The value to convert
Returns
-------
str
The value as a string.
"""
return f"'{val}'" if isinstance(val, str) else val
out_str = ""
out_str += name + " = {\n"
for k, v in in_dict.items():
out_str += f"\t{_val_to_str(str(k))}:"
if isinstance(v, dict):
out_str += "\n\t\t{\n"
for k2, v2 in v.items():
out_str += "\t\t {}: {},\n".format(
_val_to_str(str(k2)), _val_to_str(v2)
)
out_str += "\t\t},\n"
else:
out_str += f" {_val_to_str(v)},\n"
out_str += "\t}"
return out_str | seankmartin/PythonUtils | skm_pyutils/config.py | config.py | py | 5,936 | python | en | code | 1 | github-code | 36 |
7168853562 | import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import dash
from app import app
from app import server
from apps.gas_monitoring import gas_app
navBar = dbc.NavbarSimple(
children=[
dbc.NavItem(dbc.NavLink("Home", href="/")),
dbc.NavItem(dbc.NavLink("Gas-system", href="/gas-monitoring")),
dbc.NavItem(dbc.NavLink("Oil-system", href="/oil-monitoring")),
],
brand="G.O.M",
brand_href="/",
color="primary",
dark=True,
)
app.layout = html.Div(
[
dcc.Location(id='url', refresh=False),
navBar,
html.Div(
id='content',
)
]
)
error_page = html.Div([
html.H1("404",style={"textAlign":"center"}),
html.H3("Page Not Found!",style={"textAlign":"center"})
])
index_page = html.Div(
[
html.Div([
html.H2("Welcome to Gas Oil plant monitoring System."),
html.P("""Lorem ipsum dolor sit amet ac maximusrdiet convallis. Duis rutrum neque consectetur mauris tempor laoreet. Vestibulum quis nulla eu orci efficitur varrisque vel nibh. Integer eu velit eget ex consectetur consectetur sit amet vitae lectus. Mauris egestas purus et mi pulvinar, a posuere justo convallis. Nunc nec laoreet lectus. Mauris purus est, bibendum hendrerit fermentum quis, porttitor at massa.""")
],
style={
'text-align': 'center',
'position': 'absolute',
'top': '50%',
'left': '50%',
'transform': 'translate(-50%, -50%)',
'color': 'white',
})
],
style={"textAlign":"center",
'backgroundImage': 'url("assets/images/background.jpg")',
'backgroundRepeat': 'no-repeat',
'backgroundPosition': 'center',
'backgroundSize' : 'cover',
'height':'50vh',
'position':'relative',
},
)
@app.callback(dash.dependencies.Output('content', 'children'),
[dash.dependencies.Input('url', 'pathname')])
def display_page(pathname):
if pathname == '/gas-monitoring':
return gas_app.layout
elif pathname == "/oil-monitoring":
return gas_app.layout
elif pathname == '/':
return index_page
else:
return error_page
if __name__ == '__main__':
app.run_server(debug=True) | muntakim1/gas-oil-plant-monitoring | index.py | index.py | py | 2,435 | python | en | code | 0 | github-code | 36 |
20026440341 | from . import utils
import time
from .monitor import PostgresMonitor
__all__ = ['QuerySet', 'query', 'update', 'insert']
key = str(time.time())
async def query(sql):
return await PostgresMonitor.fetch(sql)
async def update(sql):
return await PostgresMonitor.fetch(sql)
async def insert(sql):
return await PostgresMonitor.fetchrow(sql)
class QuerySet(object):
_sql = {
'get_list': 'SELECT {fields} from {table} {extra} LIMIT {size} OFFSET {offset}',
'filter': 'SELECT {fields} from {table} WHERE {rule} LIMIT {size} OFFSET {offset}',
'count': 'SELECT COUNT({field}) FROM {table}',
'count_on_rule': 'SELECT COUNT({field}) FROM {table} WHERE {rule}',
'orderby': 'ORDER BY {field}',
'nearby': 'select {fields} difference from {table} where {rule} and {value} > {column} limit 1',
'orderby_decr': 'ORDER BY {field} DECR',
'filter_with_orderby': "SELECT {fields} from {table} WHERE {rule} ORDER BY {sort_key} LIMIT {size} OFFSET {offset};",
'filter_with_orderby_decr': "SELECT {fields} from {table} WHERE {rule} ORDER BY {sort_key} LIMIT {size} OFFSET {offset};",
'filter_in': "SELECT {fields} FROM {table} WHERE {key} IN ({targets});",
'filter_in_range': "SELECT {fields} FROM {table} WHERE {rule} and {key} <= {end} and {key} >= {start};",
'find_in_range': "SELECT {fields} FROM {table} WHERE {key} <= {end} and {key} >= {start};",
'find_near': "SELECT {fields} FROM {table} WHERE {key} >= {start};",
'insert': 'INSERT INTO {table} ({keys}) VALUES ({values}) RETURNING id;',
'replace': 'REPLACE INTO {table} ({keys}) VALUES ({values})',
'delete': "DELETE FROM {table} WHERE {rules} RETURNING id",
'update': "UPDATE {table} SET {key_value_pairs} WHERE {rules} RETURNING id",
'append_array': "UPDATE {table} SET {key} = array_append({key}, {value}) WHERE id='{id}' RETURNING id",
'get_via_id': "SELECT {fields} from {table} WHERE id='{id}'",
'update_via_id': "UPDATE {table} SET {key_value_pairs} WHERE id='{id}' RETURNING id",
'delete_via_id': "DELETE FROM {table} WHERE id='{id}' RETURNING id",
'incr': "UPDATE {table} SET {key}={key}+'{num}' WHERE id='{id}' RETURNING id",
'decr': "UPDATE {table} SET {key}={key}-'{num}' WHERE id='{id}' RETURNING id",
'search': "SELECT {fields} FROM {table} WHERE {extra} {key} LIKE '%{value}%' LIMIT {size} OFFSET {offset}",
'insert_or_update': "INSERT INTO {table} ({keys}) VALUES ({values}) ON DUPLICATE KEY UPDATE {key_value_pairs};"
}
def __init__(self, table):
self.table = table
self.fields = table._fields
self.tablename = table.__name__
def format(self, data):
if not isinstance(data, dict):
return utils.escape(str(data.encode('utf8')))
if not all(f in self.fields for f in data.keys()):
raise Exception("Unknew Fields", set(
data.keys()) - set(self.fields))
try:
res = {k: utils.escape(v) for k, v in data.items()}
return res
except:
raise Exception("Series Failed")
async def nearby(self, value, column, *args, **kwargs):
data = self.format(kwargs)
return await query(self._sql['nearby'].format(**{
'table': self.tablename,
'fields': utils.concat(map(utils.wrap_key, self.fields)),
'value': utils.escape(value),
'column': utils.escape(column),
'rule': utils.get_and_seg(data)
}))
async def get(self, oid):
res = await query(self._sql['get_via_id'].format(**{
'table': self.tablename,
'fields': utils.concat(map(utils.wrap_key, self.fields)),
'id': oid
}))
return res and dict(res) if res else None
async def get_by(self, *args, **kwargs):
data = self.format(kwargs)
res = await query(self._sql['filter'].format(**{
'table': self.tablename,
'rule': utils.get_and_seg(data),
'size': '1',
'offset': '0',
'fields': utils.concat(map(utils.wrap_key, self.fields)),
}))
return res and dict(res[0])
async def search(self, key, value, start, limit, filters=''):
return await query(self._sql['search'].format(**{
'table': self.tablename,
'fields': utils.concat(map(utils.wrap_key, self.fields)),
'key': self.format(key),
'value': self.format(value),
'offset': str(int(start)),
'size': str(int(limit)),
'extra': filters and utils.get_pairs(filters) + 'and' or ''
}))
async def get_list(self, size=100, offset=0, sort_key='') -> list:
if isinstance(sort_key, list):
sort_key = utils.concat(map(utils.set_desc, sort_key))
else:
sort_key = sort_key and utils.set_desc(sort_key) or ''
res = await query(self._sql['get_list'].format(**{
'table': self.tablename,
'fields': utils.concat(map(utils.wrap_key, self.fields)),
'size': str(int(size)),
'offset': str(int(offset)),
'extra': sort_key and self._sql['orderby'].format(**{
'field': sort_key
}) or ''
}))
return [dict(r) for r in res]
async def find_in(self, key, targets, fields=[]) -> dict:
return await query(self._sql['filter_in'].format(**{
'table': self.tablename,
'fields': utils.concat(map(utils.wrap_key, fields or self.fields)),
'key': key,
'targets': utils.concat(map(utils.wrap_value, targets))
}))
async def find_near(self, key, start, end, fields=[], *args, **kwargs) -> dict:
data = self.format(kwargs)
res = await query(self._sql['find_near'].format(**{
'table': self.tablename,
'fields': utils.concat(map(utils.wrap_key, fields or self.fields)),
'key': key,
'rule': utils.get_and_seg(data),
'start': utils.wrap_value(start),
'end': utils.wrap_value(end)
}))
return [dict(r) for r in res]
async def find_in_range(self, key, start, end, fields=[], *args, **kwargs) -> dict:
data = self.format(kwargs)
res = await query(self._sql['filter_in_range'].format(**{
'table': self.tablename,
'fields': utils.concat(map(utils.wrap_key, fields or self.fields)),
'key': key,
'rule': utils.get_and_seg(data),
'start': utils.wrap_value(start),
'end': utils.wrap_value(end)
}))
return [dict(r) for r in res]
async def count(self, field):
field = utils.escape(field) or '*'
return await query(self._sql['count'].format(**{
'table': self.tablename,
'field': field
}))
async def count_on_rule(self, field, rule):
rule = self.format(rule)
field = utils.escape(field)
return await query(self._sql['count_on_rule'].format(**{
'table': self.tablename,
'rule': utils.get_and_seg(rule),
'field': field
}))
async def filter(self, limit=100, offset=0, sort_key='', *args, **kwargs):
data = self.format(kwargs)
res = await query(self._sql['filter'].format(**{
'table': self.tablename,
'rule': utils.get_and_seg(data),
'size': str(int(limit)),
'fields': utils.concat(map(utils.wrap_key, self.fields)),
'offset': str(int(offset))
}))
return [dict(r) for r in res]
async def sortby(self, sort_key='id', offset=0, limit=100, extra="", decr=False, *args, **kwargs):
data = self.format(kwargs)
if isinstance(sort_key, list):
sort_key = utils.concat(map(utils.set_desc, sort_key))
else:
sort_key = utils.set_desc(sort_key)
tmpl = decr and 'filter_with_orderby_decr' or 'filter_with_orderby'
return await query(self._sql[tmpl].format(**{
'table': self.tablename,
'rule': utils.get_and_seg(data),
'size': str(int(limit)),
'sort_key': sort_key,
'offset': str(int(offset)),
'fields': utils.concat(map(utils.wrap_key, self.fields)),
}))
async def insert(self, *args, **kwargs):
data = self.format(kwargs)
return await insert(self._sql['insert'].format(**{
'table': self.tablename,
'keys': utils.concat(map(utils.wrap_key, data.keys())),
'values': utils.concat(map(utils.wrap_value, data.values()))
}))
async def replace(self, *args, **kwargs):
data = self.format(kwargs)
return await insert(self._sql['replace'].format(**{
'table': self.tablename,
'keys': utils.concat(map(utils.wrap_key, data.keys())),
'values': utils.concat(map(utils.wrap_value, data.values()))
}))
async def update(self, oid, *args, **kwargs):
data = self.format(kwargs)
pairs = utils.get_pairs(data)
return await update(self._sql['update_via_id'].format(**{
'id': oid,
'table': self.tablename,
'key_value_pairs': pairs
}))
async def append_array(self, oid, key, value):
return await update(self._sql['append_array'].format(**{
'id': oid,
'table': self.tablename,
'key': key,
'value': value
}))
async def insert_or_update(self, *args, **kwargs) -> dict:
data = self.format(kwargs)
return await insert(self._sql('insert_or_update').format(**{
'table': self.tablename,
'keys': utils.concat(map(utils.wrap_key, data.keys())),
'values': utils.concat(map(utils.wrap_key, data.values())),
'key_value_pairs': utils.get_pairs(data)
}))
async def update_by(self, rules, *args, **kwargs):
data = self.format(kwargs)
rules = self.format(rules)
return await update(self._sql['update'].format(**{
'table': self.tablename,
'rules': utils.get_and_seg(rules),
'key_value_pairs': utils.get_pairs(data)
}))
async def delete(self, oid):
return await update(self._sql['delete_via_id'].format(**{
'table': self.tablename,
'id': oid
}))
async def delete_by(self, *args, **kwargs):
data = self.format(kwargs)
return await update(self._sql['delete'].format(**{
'table': self.tablename,
'rules': utils.get_and_seg(data)
}))
async def incr(self, oid, key, num):
return await update(self._sql['incr'].format(**{
'id': oid,
'table': self.tablename,
'key': key,
'num': num
}))
async def decr(self, oid, key, num):
return await update(self._sql['decr'].format(**{
'id': oid,
'table': self.tablename,
'key': key,
'num': num
}))
| RyanKung/jirachi | jirachi/io/postgres/queryset.py | queryset.py | py | 11,228 | python | en | code | 3 | github-code | 36 |
4860489339 | def chefWar(h,p):
while not p <= 0 or not h <= 0:
if p > 0 and h <= 0:
return 1
if p <= 0 and h > 0:
return 0
else:
h -= p
p /= 2
return h,p
t = int(input())
for _ in range(t):
h, p = map(int, input().split())
print(chefWar(h,p)) | nitesh16s/DS-Algo-Problems | CodeChef/August-Cookoff/chefwars.py | chefwars.py | py | 252 | python | en | code | 0 | github-code | 36 |
31246785423 | import pandas as pd
import scipy.stats as stats
import operator
import numpy as np
from time import sleep as sl
import argparse
from sklearn.metrics import pairwise_distances,pairwise_distances_chunked
from sklearn.cluster import AgglomerativeClustering,DBSCAN
import time
from datetime import timedelta
import sys
from datetime import date
def parseargs():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e','--mgt_epi_data',help = 'csv file with MGT and epidemiological information')
parser.add_argument('-m','--mgt_level',help='the level of mgt for clustering, e.g. MGT9 ')
parser.add_argument('-n','--number_iso_threshold',help = 'the number of isolates threshold for each type in the tested MGT level')
parser.add_argument('-o','--outpath',help='the path of outfiles, e.g. /srv/scratch')
parser.add_argument('-f','--mgt_flags',help='the csv file of the flag MGT-STs for meta data')
parser.add_argument('-p','--prefix',help='the prefix for the outfile names')
parser.add_argument('-c','--country',help='country for filtering isolates for pairwise distance analysis, e.g. -c Australia')
parser.add_argument('-t','--transtype',help='international or national cluster, or a list of ODC10 STs in a .txt file without heading, e.g. -t international')
parser.add_argument('-a','--whole_allele_profile',help='Allele profile of MGT9, e.g. /srv/scratch/mgt9_alleprofile.txt')
parser.add_argument("-d", "--distances",
help="file containing distances corresponding to the alleleprofiles file (from previous run of this script if applicable)")
parser.add_argument("-l", "--dist_limits",
help="comma separated list of cluster cutoffs or range or both i.e 1,2,5 or 1-10 or 1,2,5-10, note ODC0/MGT9 were automatically given",default="1,2,5,10")
parser.add_argument("-j", "--no_jobs",
help="num jobs to split distance calc into", default=1)
args = parser.parse_args()
return args
def main():
t1 = time.time()
args = parseargs()
mgt_epi = pd.read_csv(args.mgt_epi_data)
threshod_dic = {args.mgt_level: int(args.number_iso_threshold)}
if args.mgt_flags == None:
mgt_threshod, type_dic, isolate_dic, type_isolist_dic = transmi_tracking(threshod_dic, mgt_epi)
if args.mgt_flags != None:
flags = pd.read_csv(args.mgt_flags)
mgt_threshod, type_dic, isolate_dic, type_isolist_dic = transmi_tracking_flags(threshod_dic, mgt_epi, flags)
typedf = pd.DataFrame.from_dict(type_dic, orient='index')
typedf.index.name = mgt_threshod
isolate_df = pd.DataFrame.from_dict(isolate_dic, orient='index')
isolate_df.index.name = 'Accession'
typedflist = typedf.columns.tolist() + typedf.values.tolist()
isolate_df.to_csv(args.outpath + '/' + args.prefix + args.mgt_level + '_isolate_transmission_link.csv')
typedf.to_csv(args.outpath + '/' + args.prefix + args.mgt_level + '_mgttype_transmission_link.csv')
###### allele profile getting for pairwise calculation,
if args.country != None:
odc_pairwise_list = []
if args.transtype == 'international':
typedf2 = typedf[typedf['no_country'] >= 2] ### >=2 is international
type_country_dic = typedf2['country_detail'].to_dict()
for type, subdic in type_country_dic.items():
for c in subdic.keys():
if args.country == c :
no_iso_country = subdic[c]
if no_iso_country>= 1 and args.mgt_level == "ODC10": ####### to set the threshold of >= 1 for each cluster in Australia.
if type not in odc_pairwise_list and type != "None":
odc_pairwise_list.append(type)
if args.mgt_level != "ODC10":
if type not in odc_pairwise_list and type != "None":
odc_pairwise_list.append(type)
print({"Total No. of types for pairwise distance calculation" : len(odc_pairwise_list)})
print(odc_pairwise_list)
if args.transtype == 'national':
typedf2 = typedf[typedf['no_country'] == 1] ### >=2 is international; == 1 is national
type_country_dic = typedf2['country_detail'].to_dict()
for type, subdic in type_country_dic.items():
for c in subdic.keys():
if args.country == c :
no_iso_country = subdic[c]
if no_iso_country>= 2 and args.mgt_level == "ODC10": ####### >=2 isolates for national transmission
if type not in odc_pairwise_list and type != "None":
odc_pairwise_list.append(type)
if args.mgt_level != "ODC10":
if type not in odc_pairwise_list and type != "None":
odc_pairwise_list.append(type)
print({"Total No. of types for pairwise distance calculation" : len(odc_pairwise_list)})
print(odc_pairwise_list)
if args.transtype == None:
typedf2 = typedf[typedf['no_country'] >= 1] ### including both international and national
type_country_dic = typedf2['country_detail'].to_dict()
for type, subdic in type_country_dic.items():
for c in subdic.keys():
if args.country == c :
no_iso_country = subdic[c]
if no_iso_country>= 2 and args.mgt_level == "ODC10": ####### >=2 isolates for national transmission
if type not in odc_pairwise_list and type != "None":
odc_pairwise_list.append(type)
if args.mgt_level != "ODC10":
if type not in odc_pairwise_list and type != "None":
odc_pairwise_list.append(type)
print({"Total No. of types for pairwise distance calculation" : len(odc_pairwise_list)})
print(odc_pairwise_list)
if args.transtype != None and ".txt" in args.transtype:
odc_pairwise_list=open(args.transtype,'r').read().splitlines()
# odc_pairwise_list = ['4969']
for type in odc_pairwise_list :
if type in type_isolist_dic:
print(args.mgt_level + '_' + type)
# time_pw(args, mgt_epi, args.mgt_level, type, args.outpath)
### to save the type correlated acc list
isolatelistfile = open(args.outpath + '/' + args.mgt_level + '_' + type + '_' + args.country + '_correlated_isolatelist.txt','w')
isolatelistfile.write(args.mgt_level + '_' + type + '\n')
for acc in type_isolist_dic[type]:
isolatelistfile.write(acc + '\n')
### to calculate the pairwise distance of alleles
if args.whole_allele_profile != "":
allele_prof = open(args.whole_allele_profile, "r").read().splitlines()
allele_proflist = get_part_alleleprofil(allele_prof, type_isolist_dic[type])
allele_prof_outfile = open(args.outpath + '/' + args.mgt_level + '_' + type + '_alleleprof.txt', 'w')
allele_prof_outlist=[]
for a in allele_proflist:
allele_prof_outlist.append(a)
allele_prof_outfile.write(a + '\n')
profs, id_to_strain = process_profiles(allele_prof_outlist)
pairw_outfrefix=args.outpath + '/' + args.mgt_level + '_' + type + '_'
pairwise_process(args, profs, id_to_strain, pairw_outfrefix)
t2 = timecal(t1)
## iso_process_profiles() and iso_pairwise_process() are for pairwise distance of isolates in Australia
# iso_profs, iso_id_to_strain = iso_process_profiles(allele_prof_outlist)
# print(iso_id_to_strain)
# iso_pairwise_process(args, iso_profs, iso_id_to_strain, pairw_outfrefix)
def timecal(uptime):
timespan = time.time() - uptime
print(timedelta(seconds=timespan))
return time.time()
def time_metric(a, b):
match = 0
missmatch = 0
a = [int(x) for x in a]
b = [int(x) for x in b]
d0 = date(a[0], a[1], a[2])
d1 = date(b[0], b[1], b[2])
dayinterv = abs((d1 - d0).days)
return dayinterv
###note columns have to include 'Accession','Collection Year','Collection Month','Collection Day'.
def time_pw(args, metadf,odc,type,outfrefixout):
# metadf = pd.read_csv(metapath)
metadf[odc] = metadf[odc].astype(str)
metadf = metadf[(metadf[odc]==type) | (metadf[odc]== str(type))]
timedf = pd.DataFrame(metadf, columns=['Accession','Collection Year','Collection Month','Collection Day'])
# timedf = pd.read_csv('E:/2018/2019-06-14-Australia_SEN/test/time.csv')
# # timedf['d'] = pd.to_datetime(timedf['Date'],format = '%Y/%m/%d')
timedf['Collection Day']=timedf['Collection Day'].replace(np.nan,15)
timedf = timedf[timedf['Collection Month'].notnull()]
print({'time_input_dfsize': timedf.shape[0]})
datedf=pd.DataFrame(timedf,columns=['Collection Year','Collection Month','Collection Day'])
acclist = timedf['Accession'].values.tolist()
start_time = time.time()
if datedf.shape[0]>1:
dayinterv = pairwise_distances(datedf, metric=time_metric, n_jobs=int(args.no_jobs))
# pairw_outfrefix = 'E:/2018/2019-06-14-Australia_SEN/test/time_pairwise_'+ type + '_'
if len(dayinterv) >=2 :
print("pairwise distance time", (" --- %s seconds ---" % (time.time() - start_time)))
np.savetxt(outfrefixout + odc +'_' +type + '_'+"time_pwdistances.txt", dayinterv.astype(int), fmt='%i', header=",".join(acclist), delimiter=",")
def unneg(a):
if "-" in a:
return a.split("_")[0][1:]
else:
return a
def mgt_dist_metric(a, b):
match = 0
missmatch = 0
for i in range(len(a)):
aAllele = a[i]
bAllele = b[i]
# print(aAllele,bAllele)
# sl(0.1)
if aAllele == 0 or bAllele == 0 or aAllele == bAllele:
match += 1
else:
missmatch += 1
# print(aAllele,bAllele)
return missmatch
def process_profiles(inprofiles, s=False):
profs = {}
id_to_strain = {}
for line in inprofiles[1:]:
col = line.split("\t")
if s:
if col[0] in s:
# print(col[0])
if col[1] not in profs:
noneg = [unneg(x) for x in col[3:]]
profs[col[1]] = noneg
id_to_strain[col[1]] = [str(col[0])]
else:
id_to_strain[col[1]].append(str(col[0]))
else:
# print(col[0])
if col[1] not in profs:
noneg = [unneg(x) for x in col[3:]]
profs[col[1]] = noneg
id_to_strain[col[1]] = [str(col[0])]
else:
id_to_strain[col[1]].append(str(col[0]))
return profs, id_to_strain
def pairwise_process(args,profs,id_to_strain, pairw_outfrefix):
idlist = list(profs.keys())
inprofs = [profs[x] for x in idlist]
dfo = pd.DataFrame(inprofs)
# distances only calculated if args.distances not set
lend = ""
if args.distances:
# read in distances previosly calculated
d = np.loadtxt(args.distances)
lend = len(d) ### number of MGT9 STs in this cluster
else:
start_time = time.time()
d = pairwise_distances(inprofs, metric=mgt_dist_metric, n_jobs=int(args.no_jobs))
lend = len(d)
# if len(d) >=2 :
print("pairwise distance time", (" --- %s seconds ---" % (time.time() - start_time)))
np.savetxt(pairw_outfrefix + "mgt9_distances.txt", d.astype(int), fmt='%i', header=",".join(idlist), delimiter=",")
# distance cutoffs to calculate
if lend >=2:
pairw_outfile = open(pairw_outfrefix + 'iso_odc_recal.txt','w')
diststring = args.dist_limits
dists = diststring.split(",")
distances = []
for i in dists:
if "-" in i:
n = i.split("-")
nlist = list(range(int(n[0]) + 1, int(n[1]) + 2))
# distance cutoffs seems to be non inclusive i.e. cutoff of 3 means max distance is 2
# therefore need to add 1 to all values
else:
nlist = [int(i) + 1]
distances += nlist
clusterlists = {}
preference = []
for id in idlist:
preference.append(len(id_to_strain[id]))
start_time = time.time()
for dist in distances:
clusters = AgglomerativeClustering(n_clusters=None, distance_threshold=dist, affinity="precomputed",
linkage="single").fit_predict(d)
clusterls = list(clusters)
clusterlists[dist] = clusterls
print("clustering time", (" --- %s seconds ---" % (time.time() - start_time)))
realdists = ["ODC" + str(x - 1) for x in distances]
pairw_outfile.write("Strain\tMGT9\t{}\n".format("\t".join(realdists)))
for i in range(len(idlist)):
id = idlist[i]
for strain in id_to_strain[id]:
pairw_outfile.write(strain + '\t' + str(id))
for d in distances:
clust = clusterlists[d][i]
pairw_outfile.write("\t" + str(clust + 1))
pairw_outfile.write("\n")
pairw_outfile.close()
if lend < 2: ### belong to the same MGT9 ST
pairw_outfile = open(pairw_outfrefix + 'iso_odc_recal.txt','w')
pairw_outfile.write('Strain' + '\t' + 'MGT9' + '\n')
for st, isolist in id_to_strain.items():
for iso in isolist:
pairw_outfile.write(str(iso) + '\t' + str(st) + '\n')
return
##### pairwise distance calculation
### iso_process_profiles() and iso_pairwise_process() are for pairwise distance of isolates in Australia
def iso_process_profiles(inprofiles, s=False):
profs = {}
id_to_strain = {}
for line in inprofiles[1:]:
col = line.split("\t")
if s:
if col[0] in s:
# print(col[0])
if col[0] not in profs:
noneg = [unneg(x) for x in col[3:]]
profs[col[0]] = noneg
id_to_strain[col[0]] = [str(col[1])]
else:
id_to_strain[col[0]].append(str(col[1]))
else:
# print(col[0])
if col[0] not in profs:
noneg = [unneg(x) for x in col[3:]]
profs[col[0]] = noneg
id_to_strain[col[0]] = [str(col[1])]
else:
id_to_strain[col[0]].append(str(col[1]))
return profs, id_to_strain
def iso_pairwise_process(args,profs,id_to_strain, pairw_outfrefix):
idlist = list(profs.keys())
# print(idlist)
inprofs = [profs[x] for x in idlist]
# distances only calculated if args.distances not set
lend = ""
if args.distances:
# read in distances previosly calculated
d = np.loadtxt(args.distances)
lend = len(d)
else:
start_time = time.time()
d = pairwise_distances(inprofs, metric=mgt_dist_metric, n_jobs=int(args.no_jobs))
lend = len(d)
if len(d) >=2 :
print("pairwise distance time", (" --- %s seconds ---" % (time.time() - start_time)))
np.savetxt(pairw_outfrefix + "iso_distances.txt", d.astype(int), fmt='%i', header=",".join(idlist), delimiter=",")
def epi_filt(mgt_epi,fi_dic):
col_name = mgt_epi.columns.values.tolist()
mgt_epi = mgt_epi.values.tolist()
for key in fi_dic:
epi_filter_out = []
for line in mgt_epi:
line_index = col_name.index(key)
line_value = str(line[line_index])
if line_value in fi_dic[key]:
epi_filter_out.append(line)
mgt_epi = epi_filter_out
mgt_epi = pd.DataFrame(mgt_epi)
mgt_epi.columns = col_name
print(mgt_epi.shape)
return mgt_epi
def flag_reprot(flag_input, test_file,key_list):
flag_input = flag_input.set_index('MGT_type')
dic_file = flag_input.to_dict()
mgt_levels_list = test_file.columns.tolist()
mgt_levels_list = [a for a in mgt_levels_list if "MGT" in a]
for level in mgt_levels_list:
test_file[level] = level + test_file[level].astype(str)
test_list = test_file.values.tolist()
keyflag_dic = {}
for k1 in dic_file:
if k1 in key_list:
# outfile = open(outpath + '/' + k1 + '.txt','w')
dic_file2 = {k:v for k, v in dic_file[k1].items() if "nan" not in str(v)}
mgtst_list = []
for line in test_list:
for value in line:
if value in dic_file2.keys():
mgtst = value
mgtst_list.append(mgtst)
strain_name = line [0]
predict_types = dic_file2[value]
# output = "{}\t{}\t{}".format(strain_name,predict_types,mgtst)
# print(output)
# outfile.write(output + '\n')
mgtst_ser = pd.Series(mgtst_list)
keyflag_dic[k1] = mgtst_ser.value_counts().to_dict()
return keyflag_dic
def transmi_tracking_flags(threshod_dic, mgt_epi,flags):
for mgt in threshod_dic:
gp = mgt_epi.groupby([ mgt])['Strain'].count().fillna(0)
pass_filter_type_dic = gp [gp>= threshod_dic[mgt]].to_dict()
if 0 in pass_filter_type_dic.keys():
pass_filter_type_dic.pop(0)
mgt_threshod = mgt + '_>=_' + str(threshod_dic[mgt])
type_dic = {}
type_isolist_dic = {}
isolate_dic = {}
interspread = 0
limited_year = 0
large_sclale = 0
for type in pass_filter_type_dic.keys():
type_isolist_dic[type] = []
subdf = mgt_epi[mgt_epi[mgt]== type]
key_list = ['Population_Structure', 'MDR', 'AR2_1', 'Top_MGT4_STs']
keyflag_dic = flag_reprot(flags, subdf, key_list)
country_dic = subdf.groupby(['Country'])['Strain'].count().to_dict()
year_dic = subdf.groupby(['Collection Year'])['Strain'].count().to_dict()
source_dic = subdf.groupby(['Source Type'])['Strain'].count().to_dict()
type_dic[type] = keyflag_dic
# type_dic[type]={"no_isolates":{}}
type_dic[type]['no_isolates'] = pass_filter_type_dic[type]
type_dic[type]['country_detail'] = country_dic
if 'None' in country_dic.keys():
type_dic[type]['no_country'] = len(country_dic) - 1
else:
type_dic[type]['no_country'] = len(country_dic)
type_dic[type]['year_detail'] = year_dic
if 'None' in year_dic.keys():
type_dic[type]['no_year'] = len(year_dic) - 1
else:
type_dic[type]['no_year'] = len(year_dic)
if len(year_dic) <= 2 and len(year_dic)> 0:
limited_year =limited_year+1
if len(country_dic) >= 2 :
interspread = interspread + 1
if len(year_dic) > 1 and len(year_dic) > 0 and len(country_dic) >= 2 and pass_filter_type_dic[type] > 50:
large_sclale = large_sclale + 1
type_dic[type]['source'] = source_dic
if 'None' in source_dic.keys():
type_dic[type]['no_source'] = len(source_dic) - 1
else:
type_dic[type]['no_source'] = len(source_dic)
########### to product isolate_dic
acclist = subdf['Accession'].tolist()
for acc in acclist:
type_isolist_dic[type].append(acc)
isolate_dic[acc] = {}
isolate_dic[acc] = type_dic[type]
isolate_dic[acc][mgt_threshod] = type
print("No. of passed types: " + str(len(pass_filter_type_dic)))
print('No. of potential international spreading clusters: ' + str(interspread))
print('No. of potential international spreading clusters within years: ' + str(limited_year))
print('No. of potential international spreading large clusters within years >50: ' + str(large_sclale))
return mgt_threshod,type_dic, isolate_dic, type_isolist_dic
def transmi_tracking(threshod_dic, mgt_epi):
for mgt in threshod_dic:
gp = mgt_epi.groupby([mgt])['Strain'].count().fillna(0)
pass_filter_type_dic = gp [gp>= threshod_dic[mgt]].to_dict()
if 0 in pass_filter_type_dic.keys():
pass_filter_type_dic.pop(0)
mgt_threshod = mgt + '_>=_' + str(threshod_dic[mgt])
type_dic = {}
isolate_dic = {}
type_isolist_dic ={}
interspread = 0
limited_year = 0
large_sclale = 0
for type in pass_filter_type_dic.keys():
type_isolist_dic[type]=[]
subdf = mgt_epi[mgt_epi[mgt]== type]
key_list = ['Population_Structure', 'MDR', 'AR2_1', 'Top_MGT4_STs']
# keyflag_dic = flag_reprot(flags, subdf, key_list)
country_dic = subdf.groupby(['Country'])['Strain'].count().to_dict()
year_dic = subdf.groupby(['Collection Year'])['Strain'].count().to_dict()
source_dic = subdf.groupby(['Source Type'])['Strain'].count().to_dict()
type_dic[type] = {}
type_dic[type]['no_isolates'] = pass_filter_type_dic[type]
type_dic[type]['country_detail'] = country_dic
if 'None' in country_dic.keys():
type_dic[type]['no_country'] = len(country_dic) - 1
else:
type_dic[type]['no_country'] = len(country_dic)
type_dic[type]['year_detail'] = year_dic
if 'None' in year_dic.keys():
type_dic[type]['no_year'] = len(year_dic) - 1
else:
type_dic[type]['no_year'] = len(year_dic)
if len(year_dic) <= 2 and len(year_dic)> 0:
limited_year =limited_year+1
if len(country_dic) >= 2 :
interspread = interspread + 1
if len(year_dic) > 1 and len(year_dic) > 0 and len(country_dic) >= 2 and pass_filter_type_dic[type] > 50:
large_sclale = large_sclale + 1
type_dic[type]['source'] = source_dic
if 'None' in source_dic.keys():
type_dic[type]['no_source'] = len(source_dic) - 1
else:
type_dic[type]['no_source'] = len(source_dic)
########### to product isolate_dic
acclist = subdf['Accession'].tolist()
for acc in acclist:
type_isolist_dic[type].append(acc)
isolate_dic[acc] = {}
isolate_dic[acc] = type_dic[type]
isolate_dic[acc][mgt_threshod] = type
print("No. of passed types: " + str(len(pass_filter_type_dic)))
print('No. of potential international spreading clusters: ' + str(interspread))
print('No. of potential international spreading clusters within years: ' + str(limited_year))
print('No. of potential international spreading large clusters within years >50: '+ str(large_sclale))
return mgt_threshod,type_dic, isolate_dic, type_isolist_dic
def get_part_alleleprofil(whol_alleprof, isolist):
outlist = []
outlist.append(whol_alleprof[0])
for line in whol_alleprof:
col = line.split('\t')
for acc in isolist:
if acc == col[0]: # or acc + '_cladeC.fasta' == col[0]:
outlist.append(line)
return outlist
if __name__ == "__main__":
main()
| Adalijuanluo/MGTSEnT | MGTSEnT_MGT9_singlelinkagecluster.py | MGTSEnT_MGT9_singlelinkagecluster.py | py | 24,576 | python | en | code | 0 | github-code | 36 |
37391277878 | # Take a sample of ten phishing emails (or any text files) and find the most common words in them.
# Using Shakespeare's Hamlet as a sample, find the most common words in the sample.
text=""
for i in range(10):
with open("./Files/Hamlet/"+str(i)) as f:
text += f.read()
count = {}
for word in text.split():
if word in count:
count[word] += 1
else:
count[word] = 1
for word in sorted(count, key=count.get):
print(word, count[word])
| IAteNoodles-Linux/CS_Term2 | Common.py | Common.py | py | 475 | python | en | code | 0 | github-code | 36 |
20890209940 | import os
import glob
import pickle
import logging
import argparse
from multiprocessing import Pool
import numpy as np
import pandas as pd
from core.utils import timer, do_job
# PATH
DATA_PATH = os.getenv("DATA_PATH")
PREPROCESSED_DATA_PATH = os.getenv("PREPROCESSED_DATA_PATH")
TXT_DATA_NAME = os.getenv("TXT_DATA_NAME")
print(TXT_DATA_NAME)
DW2V_PATH = os.getenv("DW2V_PATH")
PARAM_PATH = os.getenv("PARAM_PATH")
# Logger
LOGGER = logging.getLogger('JobLogging')
LOGGER.setLevel(10)
fh = logging.FileHandler('job.log')
LOGGER.addHandler(fh)
formatter = logging.Formatter('%(asctime)s:%(lineno)d:%(levelname)s:%(message)s')
fh.setFormatter(formatter)
LOGGER.info("job start")
parser = argparse.ArgumentParser(description='train Dynamic Word Embeddings')
parser.add_argument('--without_preprocess', type=int, default=0, metavar='N',
help='if preprocessor is not neccessary, set 1')
parser.add_argument('--n_job', type=str, default="10", metavar='N',
help='number of cpu for multiprocessing')
parser.add_argument('--word_freq_min', type=str, default="5", metavar='N',
help='minmiun freqency for target word')
args = parser.parse_args()
os.environ["N_JOB"] = args.n_job
os.environ["WORD_FREQ_MIN"] = args.word_freq_min
N_JOB = int(os.getenv("N_JOB"))
if __name__ =="__main__":
if args.without_preprocess == 0:
# 前処理
with do_job("preprocess tweet", LOGGER):
from core.preprocess_tweet import preprocess_one_day_tweet
TWEETS_PATHS = glob.glob(DATA_PATH+"alldata_20*")
if not os.path.exists(PREPROCESSED_DATA_PATH+TXT_DATA_NAME):
os.mkdir(PREPROCESSED_DATA_PATH+TXT_DATA_NAME)
with Pool(processes=N_JOB) as p:
p.map(preprocess_one_day_tweet, TWEETS_PATHS)
# 単語の共起を確認
with do_job("make co occ dict", LOGGER):
from core.make_DW2V import make_unique_word2idx, make_whole_day_co_occ_dict
TWEETS_PATHS = glob.glob(PREPROCESSED_DATA_PATH+TXT_DATA_NAME+"/*")
# 全単語のチェック
make_unique_word2idx(TWEETS_PATHS)
if not os.path.exists(PREPROCESSED_DATA_PATH+"co_occ_dict_word_count/"):
os.mkdir(PREPROCESSED_DATA_PATH+"co_occ_dict_word_count/")
TWEETS_PATHS = glob.glob(PREPROCESSED_DATA_PATH+TXT_DATA_NAME+"/*")
make_whole_day_co_occ_dict(TWEETS_PATHS)
# PPMIの計算
with do_job("make PPMI", LOGGER):
from core.make_DW2V import make_whole_day_ppmi_list
TWEETS_PATHS = sorted(glob.glob(PREPROCESSED_DATA_PATH+TXT_DATA_NAME+"/*"))
DICTS_PATHS = sorted(glob.glob(PREPROCESSED_DATA_PATH+"co_occ_dict_word_count/*"))
PATH_TUPLES = [(tweet_p, dict_p) for tweet_p, dict_p in zip(TWEETS_PATHS, DICTS_PATHS)]
make_whole_day_ppmi_list(PATH_TUPLES)
# DW2Vの計算
with do_job("make DW2V", LOGGER):
from core.make_DW2V import make_DW2V
make_DW2V(PARAM_PATH+"params_0803.json")
| GENZITSU/DynamicWordEmbedding | main.py | main.py | py | 3,016 | python | en | code | 1 | github-code | 36 |
8444330611 | from tokenize import TokenInfo, DEDENT
from bones.bones_tree import BonesNode
from bones.token_parser import parse
from bones.tests.conftest import tokens_from_string
from bones.suppressors.known_mutants import FUNCTION
def test_module_tokens_are_put_in_root_node():
given = tokens_from_string('''\
from somewhere import rainbow
friends = ['dog']
def a_journey(friends):
import yellow.brick
return 'home'
destination = a_journey(friends)
''')
expected_tokens = _generate_tokens_from_string('''\
from somewhere import rainbow
friends = ['dog']
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
destination = a_journey(friends)
''')
module = parse(given)
assert module.tokens == expected_tokens
def test_function_tokens_are_put_in_function_blocks():
# given
given = tokens_from_string('''\
from newer import better
old = 'bad'
def new_test(thing):
from special_date_module import is_new
return is_new(thing)
class SneakyClass():
"""just making sure nothing here show up where it shouldn't"""
pass
def old_test(thing):
from special_date_module import is_old
return is_old(thing)
good = new_test(thing) and not old_test(thing)
''')
expected_function_1_tokens = _generate_node_tokens_from_string('''\
## delete from test ##
## delete from test ##
## delete from test ##
def new_test(thing):
from special_date_module import is_new
return is_new(thing)
''')
func_block_1 = _build_func_block(expected_function_1_tokens)
expected_function_2_tokens = _generate_node_tokens_from_string('''\
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
## delete from test ##
def old_test(thing):
from special_date_module import is_old
return is_old(thing)
''')
func_block_2 = _build_func_block(expected_function_2_tokens)
# when
module = parse(given)
# then
assert module.children[0].tokens[:-1] == func_block_1.tokens[:-1]
# The dedent token (the last token) will look different because if way the test is generated
assert module.children[0].tokens[-1] == TokenInfo(type=DEDENT, string='', start=(8, 0), end=(8, 0), line='class SneakyClass():\n')
assert module.children[2].tokens[:-1] == func_block_2.tokens[:-1]
# The dedent token (the last token) will look different because if way the test is generated
assert module.children[2].tokens[-1] == TokenInfo(type=DEDENT, string='', start=(16, 0), end=(16, 0), line='good = new_test(thing) and not old_test(thing)\n')
def test_class_tokens_are_put_in_class_block():
given = tokens_from_string('''\
def a_func(thing):
pass
class ImportantClass():
"""I'm parsed correctly"""
def __init__(self):
pass
def another_func():
pass
''')
expected_tokens = _generate_node_tokens_from_string('''\
## delete from test ##
## delete from test ##
## delete from test ##
class ImportantClass():
"""I'm parsed correctly"""
## delete from test ##
## delete from test ##
''')
module = parse(given)
assert module.children[1].tokens[:-1] == expected_tokens[:-1]
# The dedent token (the last token) will look different because if way the test is generated
assert module.children[1].tokens[-1] == TokenInfo(type=DEDENT, string='', start=(10, 0), end=(10, 0), line='def another_func():\n')
def _generate_tokens_from_string(s):
toks = list(tokens_from_string(s))
return _remove_placeholder_tokens(toks)
def _remove_placeholder_tokens(toks):
# In order to generate expected_tokens that will match the parser's behavior
# some empty lines must be used to make tok.start and tok.end values the same.
reduced_toks = []
for tok in toks:
if tok.line != '## delete from test ##\n':
reduced_toks.append(tok)
return reduced_toks
def _build_func_block(expected_tokens):
func_block = BonesNode(block_type=FUNCTION, parent=None)
func_block.tokens = expected_tokens
return func_block
def _generate_node_tokens_from_string(s):
# A funcion or class node in the bones tree will not have the
# - the module's final ENDMARKER token (that token belongs in the root node)
# so, remove it here too.
return _generate_tokens_from_string(s)[:-1]
| dougroyal/bones-testing | bones/tests/test_token_parser_returned_tokens.py | test_token_parser_returned_tokens.py | py | 4,464 | python | en | code | 1 | github-code | 36 |
3323022532 | # -*- coding: utf-8 -*-
import psycopg2
# the module that connects to the database
"""
The task is to create a reporting tool that prints out reports (in plain text)
based on the data in the database.
1.What are the most popular three articles of all time?
Which articles have been accessed the most? Present this information as a sorted list
with the most popular article at the top.
2.Who are the most popular article authors of all time?
That is, when you sum up all of the articles each author has written,
which authors get the most page views? Present this as a sorted list with the most
popular author at the top.
3.On which days did more than 1% of requests lead to errors?
The log table includes a column status that indicates the HTTP status code that
the news site sent to the user's browser. (Refer back to this lesson
if you want to review the idea of HTTP status codes.)
"""
DBNAME = "news"
# Open and connect to database; Run the query; Return database cursor objects
def query(user_query):
DB = psycopg2.connect(database = DBNAME)
cursor = DB.cursor()
cursor.execute(user_query)
result = cursor.fetchall()
DB.close()
return result
# 1. popular article
def pop_article():
top_article = query("select title, count(*) from articles "
"join log on path like CONCAT('%',slug) group by title "
"order by count(*) desc limit 3")
print("The most popular three articles are:")
for title, views in top_article:
print(" \"{}\" -- {} views".format(title, views))
# 2. popular author
def pop_author():
top_authors = query("select name, count(path) from authors "
"join articles on authors.id = author join log "
"on path like CONCAT('%', slug) group by name order by count(path) desc limit 4")
print('The most popular authors are:')
for name, views in top_authors:
print(" {} -- {} views".format(name, views))
# 3. error
def error_day():
errorday = query("select date, avg from ("
"select date, (sum(error) / (select count(*) "
"from log where (time::date) = date)) as avg "
"from (select (time::date) as date, count(*) as error "
"from log where status like '4%' group by date) "
"as error_percentage group by date order by avg desc) as final "
"where avg >= .01")
print('Days with more than 1% of requests lead to errors')
for res in errorday:
print (str(res[0]) + " — " + str(round((res[1]*100), 2)) +
'%')
if __name__ == '__main__':
pop_article()
pop_author()
error_day()
| laurafang/-logs_ana | log_ana.py | log_ana.py | py | 2,577 | python | en | code | 0 | github-code | 36 |
30934317618 | #!/usr/bin/python3
# USE THIS WHEN IN NOTEBOOK -> %python
# CHANGE ACCORDINGLY: the field XXX
import sys
import time
from azure.identity import ClientSecretCredential
from azure.storage.filedatalake import DataLakeServiceClient,FileSystemClient
ACCOUNT_NAME = "XXX"
FILE_SYSTEM = "XXX"
TARGET_DIR = "XXX"
def set_permission(path,acl):
# Directories and files need to be handled differently
if path.is_directory:
directory_client = filesystem.get_directory_client(directory=path.name)
resp = directory_client.set_access_control(acl=acl)
print(f'\tApplied Directory ACL to {path.name}')
else:
file_client = filesystem.get_file_client(path.name)
# Need to remove "Default" ACL segments from ACL string because that can't be applied to files
resp = file_client.set_access_control(acl=acl[:acl.find('default')-1])
print(f'\tApplied File ACL to {path.name}')
return resp
def main(target_dir,filesystem):
# Get the target directory, subdirectories and permissions
paths = filesystem.get_paths(path=target_dir)
directory_client = filesystem.get_directory_client(directory=target_dir)
acl = directory_client.get_access_control()
target_acl_dir = acl['acl']
for path in paths:
set_permission(path,target_acl_dir)
if __name__ == '__main__':
# Clients
credential = "XXX" # the master account key.
service = DataLakeServiceClient(account_url=f'https://{ACCOUNT_NAME}.dfs.core.windows.net/', credential=credential)
filesystem = service.get_file_system_client(file_system=FILE_SYSTEM)
print('*'*20)
print(f'Storage Account Name: {ACCOUNT_NAME}')
print(f'File System Name: {FILE_SYSTEM}')
print('*'*20)
print(f'Running: Setting ACLs for all child paths (subdirectories and files) in TARGET_DIR to match parent.')
total_start = time.time() # Start Timing
main(TARGET_DIR,filesystem)
total_end = time.time() # End Timing
print("Complete: Recursive ACL configuration took {} seconds.".format(str(round(total_end - total_start,2))))
| eosantigen/devops-tools | apps/python/azure/azure_datalake_set_acl.py | azure_datalake_set_acl.py | py | 2,087 | python | en | code | 0 | github-code | 36 |
8809253230 | def main():
# Getting input from the user. (1-8 only)
height = get_height_int()
# Pyramid
for row in range(height):
# Left section of the pyramid.
for col in range(height - row):
print(" ", end="")
for hash in range(height - col):
print("#", end="")
# 2 spaces (dots) in middle of 2 pyramids.
for space in range(2):
print(" ", end="")
# Right section of the pyramid.
for hash in range(height - col):
print("#", end="")
print("")
def get_height_int():
while True:
height = int(input("Height: "))
if height in range(1, 9):
return height
main() | astimajo/CS50 | mario.py | mario.py | py | 745 | python | en | code | 1 | github-code | 36 |
74298299622 | #!/usr/bin/env python
#encoding=utf8
from json import dumps
def get_node(tree, name):
if tree.label == name:
return True, [tree.label]
if not tree.children:
return False, None
for child in tree.children:
found, addr = get_node(child, name)
if found:
return True, [tree.label] + addr
return False, None
def goto_node(tree, desc):
assert tree.label == desc[0]
node = tree
for name in desc[1:]:
nodes = [n for n in node.children if n.label == name]
if not nodes:
return False, None
node = nodes[0]
return True, node
def recreate_node(orig, desc):
"""Recreate item in orig under desc."""
tree = Tree(desc[-1], [])
success, node = goto_node(orig, desc)
if not node.children:
return tree
for child in node.children:
success, _ = goto_node(orig, desc + [child.label])
if not success:
child_node = Tree(child.label, [])
else:
child_node = recreate_node(orig, desc + [child.label])
tree.children.append(child_node)
return tree
class Tree(object):
def __init__(self, label, children=[]):
self.label = label
self.children = children
def __dict__(self):
return {self.label: [c.__dict__() for c in sorted(self.children)]}
def __str__(self, indent=None):
return dumps(self.__dict__(), indent=indent)
def __lt__(self, other):
return self.label < other.label
def __eq__(self, other):
return self.__dict__() == other.__dict__()
def from_pov(self, from_node):
found, desc = get_node(self, from_node)
if not found:
raise ValueError("Node {} not found.".format(from_node))
last_label = desc[-1]
node = recreate_node(self, desc)
last_node = node
reverse_desc = [last_label]
for name in reversed(desc[:-1]):
desc_ = get_node(self, name)[1]
parent = recreate_node(self, desc_)
last_node.children.append(parent)
parent.children = [
child for child in parent.children
if child.label != last_label
]
last_label = desc_[-1]
last_node = parent
return node
def path_to(self, from_node, to_node):
tree = self.from_pov(from_node)
found, desc = get_node(tree, to_node)
if not found:
raise ValueError("Dest node {} not found.".format(to_node))
return desc
| xiaket/exercism | python/pov/pov.py | pov.py | py | 2,553 | python | en | code | 0 | github-code | 36 |
22226405639 | import pandas as pd
import argparse
from gtfparse import read_gtf
parser = argparse.ArgumentParser()
parser.add_argument('--phenotype', type=str, required=True)
# parser.add_argument('--ncRNA', type=str, required=True)
if __name__ == '__main__':
args = parser.parse_args()
phenotype = args.phenotype
gtf = read_gtf('ReferenceGenome/Annotations/gencode.v34.chromasomal.annotation.gtf')
ncRNA_genes = gtf.loc[(gtf.gene_type.isin(['snoRNA', 'snRNA',
'lncRNA', 'unprocessed_pseudogene',
'transcribed_unprocessed_pseudogene',
'pseudogene', 'rRNA_pseudogene',
'transcribed_processed_pseudogene',
'transcribed_unitary_pseudogene',
'transcribed_unprocessed_pseudogene',
'translated_processed_pseudogene',
'translated_unprocessed_pseudogene',
'unprocessed_pseudogene'
])) & (gtf.feature == 'gene')].gene_id
counts = pd.read_csv('featureCounts/{phenotype}/Counts.txt'.format(phenotype=phenotype),
sep='\t', skiprows=1, index_col=0)
ncRNA_counts = counts.loc[ncRNA_genes]
ncRNA_counts.to_csv('featureCounts/{phenotype}_annotated_ncRNA/Counts.txt'.format(phenotype=phenotype),
sep='\t', index=True, header=True)
| bfairkun/ChromatinSplicingQTLs | code/scripts/NonCodingRNA/GetNonCodingRNAFromFeatureCounts.py | GetNonCodingRNAFromFeatureCounts.py | py | 1,685 | python | en | code | 0 | github-code | 36 |
20824479856 | import dlib
from imutils import face_utils
dlib_path = "dlibb/shape_predictor_68_face_landmarks.dat"
detector = dlib.get_frontal_face_detector()
predictor = dlib.shape_predictor(dlib_path)
import argparse
import pickle
import cv2
import os
import mpmath
import numpy as np
# face_classifier = cv2.CascadeClassifier('harcascades/haarcascade_frontalface_default.xml')
src_path = ("O:\\Nama_College\\FYP\\MY_FYP_CODE\\MY_FYP_CODE\\MY_CODE\\TESTING_DATASET\\")
predict = []
features_vector = []
pickle_in = open("O:\\Nama_College\\FYP\\MY_FYP_CODE\\MY_FYP_CODE\\MY_CODE\\dlib_normalized.pickle","rb")
# pickle_in = open("O:\\Nama_College\\FYP\\MY_FYP_CODE\\MY_FYP_CODE\\MY_CODE\\dlib_normalized_full.pickle","rb")
model = pickle.load(pickle_in)
cap = cv2.VideoCapture(0)
B= 0
while (True):
ret, frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
B += 1
if B % 5 == 0:
print(B)
face = detector(gray,0)
for (J, rect) in enumerate(face):
shap = predictor(gray, rect)
xlist = []
ylist = []
shap = face_utils.shape_to_np(shap)
Centre = (shap[30])
centre_x = Centre[0]
centre_y = Centre[1]
shap = shap[18:68]
for i in shap:
xlist.append(i[0])
ylist.append(i[1])
forx = []
fory = []
for x in xlist:
forx.append((x - centre_x) ** 2)
for y in ylist:
fory.append((y - centre_y) ** 2)
listsum = [sum(x) for x in zip(forx, fory)]
features = []
for i in listsum:
k = mpmath.sqrt(float(i))
features.append(float(k))
maxx = (max(features))
final = []
for i in features:
if (i == 0.0):
continue
F = i / maxx
final.append(F)
# print(final)
numpy_array = np.array(final)
prediction = model.predict([numpy_array])[0]
# predict.append(prediction)
(x, y, w, h) = face_utils.rect_to_bb(rect)
cv2.rectangle(frame, (x, y), (x + w, y + h),(0, 255, 0), 2)
# display the image and the prediction
# cv2.putText(frame, "FACE ({})".format(J+ 1) + " " + prediction, (x , y ), cv2.FONT_HERSHEY_COMPLEX, 0.5,
# (0, 255, 0), 2)
cv2.putText(frame, prediction, (x, y), cv2.FONT_HERSHEY_COMPLEX, 0.5,
(0, 255, 0), 2)
# cv2.putText(tasveer, prediction, (x-5 , y-5 ), cv2.FONT_HERSHEY_COMPLEX, 1.2,
# (0, 0, 255),4)
print(prediction)
cv2.circle(frame, (centre_x, centre_y), 1, (0, 0, 0), 5)
for (x, y) in shap:
cv2.circle(frame, (x, y), 1, (0, 0, 255), 2)
cv2.imshow("Image", frame)
cv2.waitKey(1)
if k == 'q':
break
cap.release()
cv2.destroyAllWindows()
| Hassan1175/MY_FYP_CODE | MY_CODE/videoframes.py | videoframes.py | py | 3,060 | python | en | code | 0 | github-code | 36 |
4108037157 | candies, multiple = [int(x) for x in input().split()]
primes = [True for i in range(candies + 1)]
primes[0] = False
primes[1] = False
combos = 0
for p in range(2, candies + 1):
if primes[p]:
combos += (candies - p) // multiple + 1
combos += (candies - p - 1) // multiple + 1
for i in range(p * 2, candies + 1, p):
primes[i] = False
# print(primes)
print(combos)
| AAZZAZRON/DMOJ-Solutions | dmopc15c1p4.py | dmopc15c1p4.py | py | 403 | python | en | code | 1 | github-code | 36 |
9911287046 | #!/usr/bin/python
# -*- coding: utf-8 -*-
'''
Custom filters for use in openshift_aws
'''
from ansible import errors
class FilterModule(object):
''' Custom ansible filters for use by openshift_aws role'''
@staticmethod
def scale_groups_serial(scale_group_info, upgrade=False):
''' This function will determine what the deployment serial should be and return it
Search through the tags and find the deployment_serial tag. Once found,
determine if an increment is needed during an upgrade.
if upgrade is true then increment the serial and return it
else return the serial
'''
if scale_group_info == []:
return 1
scale_group_info = scale_group_info[0]
if not isinstance(scale_group_info, dict):
raise errors.AnsibleFilterError("|filter plugin failed: Expected scale_group_info to be a dict")
serial = None
for tag in scale_group_info['tags']:
if tag['key'] == 'deployment_serial':
serial = int(tag['value'])
if upgrade:
serial += 1
break
else:
raise errors.AnsibleFilterError("|filter plugin failed: deployment_serial tag was not found")
return serial
@staticmethod
def scale_groups_match_capacity(scale_group_info):
''' This function will verify that the scale group instance count matches
the scale group desired capacity
'''
for scale_group in scale_group_info:
if scale_group['desired_capacity'] != len(scale_group['instances']):
return False
return True
@staticmethod
def build_instance_tags(clusterid):
''' This function will return a dictionary of the instance tags.
The main desire to have this inside of a filter_plugin is that we
need to build the following key.
{"kubernetes.io/cluster/{{ openshift_aws_clusterid }}": "{{ openshift_aws_clusterid}}"}
'''
tags = {'clusterid': clusterid,
'kubernetes.io/cluster/{}'.format(clusterid): clusterid}
return tags
def filters(self):
''' returns a mapping of filters to methods '''
return {'build_instance_tags': self.build_instance_tags,
'scale_groups_match_capacity': self.scale_groups_match_capacity,
'scale_groups_serial': self.scale_groups_serial}
| barkbay/openshift-ansible-gravitee | roles/lib_utils/filter_plugins/openshift_aws_filters.py | openshift_aws_filters.py | py | 2,484 | python | en | code | 1 | github-code | 36 |
11605675003 | PHANTOM_SYS_INFO_URL = "{url}rest/system_info"
PHANTOM_ASSET_INFO_URL = "{url}rest/asset/{asset_id}"
URL_GET_CODE = 'https://login.salesforce.com/services/oauth2/authorize'
URL_GET_TOKEN = 'https://login.salesforce.com/services/oauth2/token'
URL_GET_CODE_TEST = 'https://test.salesforce.com/services/oauth2/authorize'
URL_GET_TOKEN_TEST = 'https://test.salesforce.com/services/oauth2/token'
API_ENDPOINT_DESCRIBE_GLOBAL = '{version}/sobjects/'
API_ENDPOINT_GET_UPDATED = '{version}/sobjects/{sobject}/updated/'
API_ENDPOINT_OBJECT_ID = '{version}/sobjects/{sobject}/{id}/'
API_ENDPOINT_RUN_QUERY = '{version}/{query_type}/'
API_ENDPOINT_OBJECT = '{version}/sobjects/{sobject}/'
API_ENDPOINT_GET_LISTVIEWS = '{version}/sobjects/{sobject}/listviews/'
API_ENDPOINT_GET_LISTVIEW_LOCATOR = '{version}/sobjects/{sobject}/listviews/{locator}/'
API_ENDPOINT_BATCH_REQUEST = '{version}/composite/batch/'
API_ENDPOINT_GET_LISTVIEWS_FROM_OBJECT = '{version}/ui-api/list-records/{sobject}/{view_name}'
CASE_FIELD_MAP = {
'parent_case_id': 'ParentId',
'subject': 'Subject',
'priority': 'Priority',
'description': 'Description',
'status': 'Status',
'closed': 'IsClosed',
'escalated': 'IsEscalated'
}
SALESFORCE_INVALID_INTEGER = 'Please provide non-zero positive integer in "{parameter}"'
SALESFORCE_UNKNOWN_ERR_MSG = "Unknown error occurred. Please check the asset configuration and|or action parameters."
SALESFORCE_ERR_CODE_UNAVAILABLE = "Error code unavailable"
SALESFORCE_DEFAULT_TIMEOUT = 30
| splunk-soar-connectors/salesforce | salesforce_consts.py | salesforce_consts.py | py | 1,518 | python | en | code | 0 | github-code | 36 |
35866803773 | import re
from datetime import date
from typing import Optional
import docx # type: ignore
from adaptive_hockey_federation.parser.user_card import BaseUserInfo
NAME = '[И|и][М|м][Я|я]'
SURNAME = '[Ф|ф][А|а][М|м][И|и][Л|л][И|и][Я|я]'
PATRONYMIC = '[О|о][Т|т]?[Ч|ч][Е|е][С|с][Т|т][В|в][О|о]'
DATE_OF_BIRTH = '[Д|д][А|а][Т|т][А|а] [Р|р][О|о].+'
TEAM = '[К|к][О|о][М|м][А|а][Н|н][Д|д][А|а]'
PLAYER_NUMBER = '[И|и][Г|г][Р|р][О|о][В|в][О|о][Й|й]'
POSITION = '[П|п][О|о][З|з][И|и][Ц|ц][И|и][Я|я]'
NUMERIC_STATUS = '[Ч|ч].+[С|с][Т|т].+'
PLAYER_CLASS = '[К|к][Л|л][А|а][С|с][С|с]'
def read_file_columns(file: docx) -> list[docx]:
"""Функция находит таблицы в файле и возвращает список объектов
docx с данными каждого столбца.
"""
return [
column
for table in file.tables
for index, column in enumerate(table.columns)
]
def read_file_text(file: docx) -> list[str]:
"""Функция находит текстовые данные в файле и возвращает список объектов
docx с найденными данными.
"""
return [
run.text
for paragraph in file.paragraphs
for run in paragraph.runs
]
def get_counter_for_columns_parser(
columns: list[docx]
) -> int:
count = 0
for column in columns:
for index, cell in enumerate(column.cells):
if re.search(r'п/п', cell.text):
for cell in column.cells[index + 1:]:
if cell.text and len(cell.text) < 4:
count += 1
else:
break
else:
if count > 0:
break
return count
def columns_parser(
columns: list[docx],
regular_expression: str,
) -> list[Optional[str]]:
"""Функция находит столбец по названию и списком выводит содержимое
каждой ячейки этого столбца.
"""
output = [
text if text
else None
for column in columns
if re.search(
regular_expression,
list(cell.text for cell in column.cells)[0]
)
for text in list(cell.text for cell in column.cells)[1:]
]
if not output:
count = get_counter_for_columns_parser(columns)
for column in columns:
for index, cell in enumerate(column.cells):
if re.search(regular_expression, cell.text):
for cell in column.cells[index + 1:index + 1 + count]:
output.append(cell.text)
return output
def find_names(columns: list[docx], regular_expression: str) -> list[str]:
"""Функция парсит в искомом столбце имена. Опирается на шаблон ФИО
(имя идет после фамилии на втором месте).
"""
names_list = columns_parser(columns, regular_expression)
return [
name.split()[1].rstrip()
for name in names_list
if name
]
def find_surnames(columns: list[docx], regular_expression: str) -> list[str]:
"""Функция парсит в искомом столбце фамилии. Опирается на шаблон ФИО
(фамилия идет на первом месте).
"""
surnames_list = columns_parser(columns, regular_expression)
return [
surname.split()[0].rstrip()
for surname in surnames_list
if surname
]
def find_patronymics(
columns: list[docx],
regular_expression: str,
) -> list[str]:
"""Функция парсит в искомом столбце отчества. Опирается на шаблон ФИО
(отчество идет на последнем месте).
"""
patronymics_list = columns_parser(columns, regular_expression)
return [
patronymic.replace('/', ' ').split()[2].rstrip().rstrip(',')
if patronymic and len(patronymic.split()) > 2
else 'Отчество отсутствует'
for patronymic in patronymics_list
]
def find_dates_of_birth(
columns: list[docx],
regular_expression: str,
) -> list[date]:
"""Функция парсит в искомом столбце дату рождения
и опирается на шаблон дд.мм.гггг.
"""
dates_of_birth_list = columns_parser(columns, regular_expression)
dates_of_birth_list_clear = []
for date_of_birth in dates_of_birth_list:
if date_of_birth:
try:
for day, month, year in [
re.sub(r'\D', ' ', date_of_birth).split()
]:
if len(year) == 2:
if int(year) > 23:
year = '19' + year
else:
year = '20' + year
dates_of_birth_list_clear.append(
date(int(year), int(month), int(day))
)
except ValueError or IndexError: # type: ignore
dates_of_birth_list_clear.append(date(1900, 1, 1))
else:
dates_of_birth_list_clear.append(date(1900, 1, 1))
return dates_of_birth_list_clear
def find_team(
text: list[str],
columns: list[docx],
regular_expression: str,
) -> str:
"""Функция парсит название команды.
"""
text_clear = ' '.join(text)
text_clear = re.sub(
r'\W+|_+|ХК|СХК|ДЮСХК|Хоккейный клуб|по незрячему хоккею'
'|по специальному хоккею|Спец хоккей|по специальному|по следж-хоккею',
' ',
text_clear
).split() # type: ignore
try:
return [
'Молния Прикамья'
if text_clear[index + 2] == 'Прикамья'
else 'Ак Барс'
if text_clear[index + 1] == 'Ак'
else 'Снежные Барсы'
if text_clear[index + 1] == 'Снежные'
else 'Хоккей Для Детей'
if text_clear[index + 1] == 'Хоккей'
else 'Дети-Икс'
if text_clear[index + 1] == 'Дети'
else 'СКА-Стрела'
if text_clear[index + 1] == 'СКА'
else 'Сборная Новосибирской области'
if text_clear[index + 2] == 'Новосибирской'
else 'Атал'
if text_clear[index + 3] == 'Атал'
else 'Крылья Мечты'
if text_clear[index + 2] == 'мечты'
else 'Огни Магнитки'
if text_clear[index + 1] == 'Огни'
else 'Энергия Жизни Краснодар'
if text_clear[index + 3] == 'Краснодар'
else 'Энергия Жизни Сочи'
if text_clear[index + 4] == 'Сочи'
else 'Динамо-Москва'
if text_clear[index + 1] == 'Динамо'
else 'Крылья Советов'
if text_clear[index + 2] == 'Советов'
else 'Красная Ракета'
if text_clear[index + 2] == 'Ракета'
else 'Красная Молния'
if text_clear[index + 2] == 'молния'
else 'Сахалинские Львята'
if text_clear[index + 1] == 'Сахалинские'
else 'Мамонтята Югры'
if text_clear[index + 1] == 'Мамонтята'
else 'Уральские Волки'
if text_clear[index + 1] == 'Уральские'
else 'Нет названия команды'
if text_clear[index + 1] == 'Всего'
else text_clear[index + 1].capitalize()
for index, txt in enumerate(text_clear)
if re.search(regular_expression, txt)
][0]
except IndexError:
for column in columns:
for cell in column.cells:
if re.search(regular_expression, cell.text):
txt = re.sub(r'\W', ' ', cell.text)
return txt.split()[1].capitalize()
return 'Название команды не найдено'
def find_players_number(
columns: list[docx],
regular_expression: str,
) -> list[int]:
"""Функция парсит в искомом столбце номер игрока.
"""
players_number_list = columns_parser(columns, regular_expression)
players_number_list_clear = []
for player_number in players_number_list:
if player_number:
try:
players_number_list_clear.append(
int(re.sub(r'\D', '', player_number)[:2])
)
except ValueError:
players_number_list_clear.append(0)
else:
players_number_list_clear.append(0)
return players_number_list_clear
def find_positions(columns: list[docx], regular_expression: str) -> list[str]:
"""Функция парсит в искомом столбце позицию игрока на поле.
"""
positions_list = columns_parser(columns, regular_expression)
return [
'нападающий'
if re.search(
r'^н|^Н|^H|^Нп|^нл|^нп|^цн|^лн|^Нап|^№|^А,|^К,',
position.lstrip()
)
else 'защитник'
if re.search(r'^з|^З|^Зщ|^Защ', position.lstrip())
else 'вратарь'
if re.search(r'^Вр|^В|^вр', position.lstrip())
else 'Позиция записана неверно'
if not re.sub(r'\n|\(.+|\d', '', position)
else re.sub(
r'\n|\(.+|\d|Капитан',
'',
position
).lower().rstrip().replace(',', '').lstrip()
for position in positions_list
if position
]
def find_numeric_statuses(file: docx) -> list[list[str]]:
numeric_statuses_list = []
for table in file.tables:
for row in table.rows:
txt = row.cells[1].text.title()
txt = re.sub(r'\W|Коляс.+|Здоровый', ' ', txt)
if len(txt.split()) <= 4:
try:
numeric_status = row.cells[4].text
numeric_status = re.sub(r'\D', '', numeric_status)
if numeric_status:
if len(txt.split()) == 2:
txt += ' Отчество отсутствует'
numeric_statuses_list.append(
txt.split()[:3] + [numeric_status]
)
except IndexError:
pass
return numeric_statuses_list
def numeric_status_check(
name: str,
surname: str,
patronymics: str,
statuses: list[list[str]],
) -> Optional[int]:
for status in statuses:
if surname == status[0]:
if name == status[1]:
if patronymics.split()[0] == status[2]:
return int(status[3])
return None
def docx_parser(
path: str,
numeric_statuses: list[list[str]]
) -> list[BaseUserInfo]:
"""Функция собирает все данные об игроке
и передает их в dataclass.
"""
file = docx.Document(path)
columns_from_file = read_file_columns(file)
text_from_file = read_file_text(file)
names = find_names(columns_from_file, NAME)
surnames = find_surnames(columns_from_file, SURNAME)
patronymics = find_patronymics(columns_from_file, PATRONYMIC)
dates_of_birth = find_dates_of_birth(
columns_from_file,
DATE_OF_BIRTH,
)
team = find_team(text_from_file, columns_from_file, TEAM)
players_number = find_players_number(columns_from_file, PLAYER_NUMBER)
positions = find_positions(columns_from_file, POSITION)
return [
BaseUserInfo(
name=names[index],
surname=surnames[index],
date_of_birth=dates_of_birth[index],
team=team,
player_number=players_number[index],
position=positions[index],
numeric_status=numeric_status_check(
names[index],
surnames[index],
patronymics[index],
numeric_statuses,
),
patronymic=patronymics[index],
)
for index in range(len(names))
]
| Studio-Yandex-Practicum/adaptive_hockey_federation | adaptive_hockey_federation/parser/docx_parser.py | docx_parser.py | py | 12,958 | python | ru | code | 2 | github-code | 36 |
34696045892 | import os
import yaml
import openai
"""
使用openai API的方式访问ChatGPT/azure GPT
"""
def set_env(cfg_file):
with open(cfg_file) as f:
config_data = yaml.safe_load(f)
azure = config_data["azure"]
if azure is not None:
for k, v in azure.items():
os.environ[k] = v
os.environ['MY_VARIABLE'] = 'my_value'
def ai_chat(msgs=None):
openai.api_type = "azure"
openai.api_version = "2023-03-15-preview"
openai.api_base = os.getenv("api-base") # Your Azure OpenAI resource's endpoint value.
openai.api_key = os.getenv("api-key")
response = openai.ChatCompletion.create(
# 报错:openai.error.InvalidRequestError: The API deployment for this resource does not exist
# 解决:只能使用账号已经部署的模型,通过OpenAI Studio查看部署了哪些模型
engine="gpt-35-turbo-test", # The deployment name you chose when you deployed the ChatGPT or GPT-4 model.
# 目前只能通过每次请求上传已有上下文的方式来记忆上下文/多轮对话
messages=msgs
)
print(response)
print(response['choices'][0]['message']['content'])
if __name__ == '__main__':
set_env('D:\\qiyu-work\\openaikey.yaml')
messages = [
# {"role": "system", "content": "Assistant is a large language model trained by OpenAI."},
#{"role": "system", "content": "Assistant is a large language model trained by OpenAI."},
{"role": "system", "content": "你现在是一名汽车4S店专业的销售顾问,客户咨询你价格,请把下面的话用可爱的语气表达出来,不要重复我说的话,回复不能超过30个字"},
{"role": "user", "content": "价格会受多因素的影响实时发生变化,具体我让销售跟您聊哈"}
]
ai_chat(messages)
| zzfengxia/python3-learn | dailytool/connect_openai_api.py | connect_openai_api.py | py | 1,851 | python | en | code | 0 | github-code | 36 |
74249579945 | """"
Controls EC2 Services
"""
import boto3
import logging
import os
"""
Ec2 controller: finds ec2 instances that have a devday tag, has the ability to stop, start and to modify their shutdown behaviour - to avoid termination
"""
class ec2Controller:
STOPBEHAVIOUR = 'stop'
def __init__(self, region, searchTag):
self.region = region
self.client = boto3.client('ec2', region_name= region)
self.searchTag = searchTag.lower()
self.logger = logging.getLogger(__name__)
self.enabledServices = {}
env = os.environ
"""
Main entry point to be called from ResourceFinder - finds all EC2 Services that have been tagged
Returns a Map [instance id] : {state , platform , name}
"""
def findResourcesForEC2(self):
ec2Map = self.findServices(running=False) # Get all EC2 running or not that are tagged
return ec2Map
"""
Main entry point to signal a STOP of developer day event
All tagged and running EC2 servers will be stopped
"""
def stopDayEvent(self):
result = True
totalResult=True
ec2Map = self.findServices(running=True) # Find all those that are currently running
if len(ec2Map) ==0:
self.logger.info("There are currently no active EC2 instances that are tagged - they all seemed stopped or do not exist")
return True
self.correctShutDownBehaviour(ec2Map)
try:
for ec2instance in ec2Map:
ec2Dict = ec2Map[ec2instance]
state = ec2Dict["state"]
platform = ec2Dict["platform"]
name = ec2Dict["name"]
if state=="running":
response = self.client.stop_instances(
InstanceIds = [ec2instance]
)
cs = response['StoppingInstances'][0]['CurrentState']['Name']
self.logger.info(f"Shutting down instance {name} id {ec2instance}, plaform {platform} moving from running --> {cs}")
result = ("stopping" == cs)
if not result:
totalResult = False
except Exception as e:
self.logger.error("Could not stop all EC2 instances ")
self.logger.exception(e)
totalResult = False
return totalResult
"""
Main entry point to signal a START of developer day event
Finds all tagged Ec2 servers that are currently stopped
"""
def startDayEvent(self):
result = True
totalResult = True
ec2Map = self.findServices(running=False) # Find all those that are currently stopped
if len(ec2Map) == 0:
self.logger.info(
"There are currently no stopped EC2 instances that are tagged - they are either running or dont exist")
return True
try:
for ec2instance in ec2Map:
ec2Dict = ec2Map[ec2instance]
state = ec2Dict["state"]
platform = ec2Dict["platform"]
name = ec2Dict["name"]
if state=="stopped":
response = self.client.start_instances(
InstanceIds = [ec2instance]
)
cs = response['StartingInstances'][0]['CurrentState']['Name']
self.logger.info(f"Starting up instance {name} id {ec2instance}, plaform {platform} moving from stopped --> {cs}")
result = ("pending" == cs)
if not result:
totalResult = False
except Exception as e:
self.logger.error("Could not start all EC2 instances ")
self.logger.exception(e)
totalResult = False
return totalResult
"""
Checks the SERVICE ARN for the special searchTag - and see if the Tag is set to TRUE
return True or False
"""
def _checkforTag(self,tagsDict):
self.logger.debug(f"Tags are {tagsDict}")
for tag in tagsDict:
key = tag.get('Key')
if key is not None:
value=tag['Value'].lower()
if key.lower() == self.searchTag and value=='true':
return True
return False
"""
Finds all Ec2 instances that exist with a dev day tag
if the running parameter is set to True only instances that are currently running will be picked up, passing False will flag all those that are stopped
Returns a MAP of [instance id] : {state , platform , name}
"""
def findServices(self, running=True):
serviceMap = {}
try:
response = self.client.describe_instances()
nextToken = "A"
while nextToken is not None:
nextToken = response.get("NextToken")
reservationL = response.get("Reservations",[])
for reservation in reservationL:
instanceL = reservation.get("Instances",[])
for ins in instanceL:
self.logger.debug(f"Instance Details: {ins} ")
instanceId = ins["InstanceId"]
platform = ins.get("Platform","Linux")
state = ins["State"]['Name']
tags = ins.get('Tags',[])
name = '(no name)'
for tag in tags:
k = tag['Key']
if k.lower() =='name':
name = tag['Value']
break
if self._checkforTag(tags):
self.logger.info(f"EC2: {name} instance-id {instanceId} - platform {platform}, current state {state} is tagged for Developer day/night")
if (running and state=="running") or (not running and state=="stopped"):
serviceMap[instanceId] = {"state" : state, "platform" : platform, "name": name}
else:
self.logger.info(f"EC2: skipping instance_id {instanceId} {name} as it is already in the desired state")
else:
self.logger.info(f"EC2: skipping untagged instance_id {instanceId} {name}")
if nextToken is not None:
response = self.client.describe_instances(NextToken=nextToken)
except Exception as e:
self.logger.warning(f"Could not access the instances in the region {self.region}")
return serviceMap
"""
Makes sure the instances are not terminated when they are shutdown - this method returns the behaviour
"""
def _getShutdownBehavior(self, instanceID):
response = self.client.describe_instance_attribute(
Attribute= 'instanceInitiatedShutdownBehavior' ,
InstanceId=instanceID)
behaviour = response['InstanceInitiatedShutdownBehavior']['Value']
self.logger.info(f"instance {instanceID}, shutdown behaviour is currently set to {behaviour}")
return behaviour
def correctShutDownBehaviour(self, serviceMap):
self.logger.info("EC2: Checking and correcting the shutdown behaviour to avoid instance termination when sleeping")
for instance in serviceMap:
behaviour = self._getShutdownBehavior(instance)
if not behaviour == self.STOPBEHAVIOUR:
self.logger.info(f"EC2: Correcting Shutdown behaviour.... on instance {instance}")
response =self.client.modify_instance_attribute(
InstanceId = instance,
InstanceInitiatedShutdownBehavior={"Value" : self.STOPBEHAVIOUR})
else:
self.logger.info(f"EC2: shutdown behaviour on instance {instance} already correctly set to STOP")
| evoraglobal/SleepSaver | ec2Controller.py | ec2Controller.py | py | 7,948 | python | en | code | 0 | github-code | 36 |
19665531020 | from puzzle_input import get_puzzle_input
strategy = get_puzzle_input(2)
def play_rock_paper_scissor(strategy):
points_one = 0
points_two = 0
elves_hands = {"A": 1, "B": 2, "C": 3}
your_hands = {"X": 1, "Y": 2, "Z": 3}
for game in strategy:
play = game.strip().split(" ")
elf, you = [play[0], play[1]]
if elves_hands[elf] == your_hands[you]:
points_one += 3 + your_hands[you]
elif (
elves_hands[elf] - your_hands[you] == 2
or your_hands[you] - elves_hands[elf] == 1
):
points_one += 6 + your_hands[you]
else:
points_one += your_hands[you]
your_hands_values = list(your_hands.values())
if you == "X":
loose = abs(elves_hands[elf] + 1) % 3
points_two += your_hands_values[loose]
print(f"Loose: {elf} Points: {your_hands_values[loose]}")
elif you == "Y":
points_two += 3 + elves_hands[elf]
print(f"Draw: {elf} Points: {3 + elves_hands[elf]}")
elif you == "Z":
win = (elves_hands[elf] + 3) % 3
points_two += 6 + your_hands_values[win]
print(f"Win: {elf} Points: {6 + your_hands_values[win]}")
return (points_one, points_two)
print(play_rock_paper_scissor(strategy))
| jonnaliesel/advent-of-code | 2022/day_2.py | day_2.py | py | 1,329 | python | en | code | 0 | github-code | 36 |
73387268903 | '''
5) Desenvolver um programa que pergunte 4 notas escolares de um aluno e exiba mensagem informando que o aluno foi aprovado se a média escolar for maior ou igual a 5. Se o aluno não foi aprovado, indicar uma mensagem informando essa condição. Apresentar junto com a mensagem de aprovação ou reprovação o valor da média obtida pelo aluno.
'''
media1 = float(input("Me diga sua nota do primeiro bimestre: "))
media2 = float(input("Me diga sua nota do segundo bimestre: "))
media3 = float(input("Me diga sua nota do terceiro bimestre: "))
media4 = float(input("Me diga sua nota do quarto bimestre: "))
media = (media1 + media2 + media3 + media4) / 4
if (media >= 5):
print(f"Sua média do ano foi: {media}. Parabéns, você foi aprovado!")
else:
print(f"Sua média do ano foi: {media}. Meus pêsames, você foi reprovado.") | nthancdc/progDecisao | lista041/questao5.py | questao5.py | py | 843 | python | pt | code | 0 | github-code | 36 |
72549467305 | #!/usr/bin/env python3
# Purpose: Scale the coordinates of the Aya in the Quran images with a factor.
# Author: Abdallah Abdelazim
# Features:
# - Scale the coordinates of the Aya in the Quran images with a factor.
# - The input CSV file 'data.csv' is expected to be in the same folder as this script.
# - The output CSV file is saved to 'data_output.csv'.
# Pre-requisites:
# - Python 3.6 or higher.
#
import os
import csv
# Set the factor to multiply x and y values by
factor = 0.6
script_folder = os.path.dirname(__file__)
input_file = os.path.join(script_folder, "data.csv")
output_file = os.path.join(script_folder, "data_output.csv")
# Open the input and output CSV files
with open(input_file, mode='r') as input_file, open(output_file, mode='w', newline='') as output_file:
# Create CSV reader and writer objects
csv_reader = csv.reader(input_file)
csv_writer = csv.writer(output_file)
# Read and write the header row
header_row = next(csv_reader)
csv_writer.writerow(header_row)
# Loop through each row in the input CSV file
for row in csv_reader:
# Extract the "page", "x", and "y" values from the row
aya_id = row[0]
page = row[1]
x = int(row[2])
y = int(row[3])
# Multiply the "x" and "y" values by the factor
x *= factor
y *= factor
# Write the updated values to the output CSV file
csv_writer.writerow([aya_id, page, int(x), int(y)])
print("Done!")
| QuranHub/quran-images-utils | csv_data_scale/scale_csv.py | scale_csv.py | py | 1,493 | python | en | code | 3 | github-code | 36 |
14178436834 | import time
import random
'''
Simple implementation of UUIDv8 with 60 bit Timestamp Usage
Based on https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-01.html first with some changes in clock secuence part.
Later based on https://www.ietf.org/archive/id/draft-peabody-dispatch-new-uuid-format-04.html
Main doc is https://datatracker.ietf.org/doc/draft-ietf-uuidrev-rfc4122bis/07/
'''
def uuid8():
ts, ns = divmod(time.time_ns(), 1_000_000_000)
# make 28 bits of nanoseconds with a possible little loss of precision on decode
ms = (ns >> 2) & 0xfffffff
# some randoms for later bit operations
rnd1 = random.randint(0, 0x0fff)
rnd2 = random.randint(0, 0xffffffff)
rnd3 = random.randint(0, 0xffff)
bits = ts << 96
# use first 16 bits of milliseconds
bits = bits | ( (ms >> 12) << 80 )
# ver 8, yes
bits = bits | (8 << 76)
# use last 12 bits of milliseconds
bits = bits | ( ( ms & 0xfff ) << 64 )
# ietf draft says var should be 0b10
# other bits is random according to later drafts
bits = bits | (rnd1 | 0x2000) << 48
# mighty random fill
bits = bits | (rnd2 << 16)
bits = bits | rnd3
bits = str('{0:x}'.format(bits))
return '%s-%s-%s-%s-%s' % (bits[:8], bits[8:12], bits[12:16], bits[16:20], bits[20:])
if __name__ == '__main__':
print(uuid8())
| ningauble/uuid8 | uuid.py | uuid.py | py | 1,413 | python | en | code | 0 | github-code | 36 |
23747778179 | """
Khinshan Khan - cli.py.
This module contains all command line interaction with user.
"""
import sys
def prompt(message):
"""Print optional message and wait for user input."""
if message:
print(message)
return input(">> ").strip()
def input_monad(message):
"""Listen for user events and acts accordingly, or else returns given input value ."""
result = None
while True:
try:
result = prompt(message)
except EOFError:
print("\nExiting mcm-oss.\nHave a nice day!")
sys.exit()
except KeyboardInterrupt:
print()
continue
if result is not None:
return result
def verify_command(command):
"""Verify a given command is legal."""
command_length = len(command)
if command_length > 0:
if command_length == 1:
if command[0] in ["Q", "t"]:
return (command[0], None)
if command_length == 2:
if ((command[0] == 'S' and command[1] in ["r", "i", "m"])
or (command[0] in ["A", "AR", "d", "D"] and command[1].isnumeric())):
return (command[0], command[1])
return (False, " ".join(command))
def interactive():
"""Get the next command user enters and pass it up to main."""
user_input = input_monad(None)
parsed_input = user_input.split()
command, arguments = verify_command(parsed_input)
return (command, arguments)
def input_num(message):
"""Get an input which is ensured to be a numeric."""
while True:
user_input = input_monad(message)
if user_input.isnumeric():
return int(user_input)
print("Invalid value: This value can only be a numeric like `55`")
def initialize():
"""Get necessary values for simulation."""
ram_size = input_num("How much RAM is on the simulated computer? (bytes)")
disks_max = input_num("How many hard disks on the simulated computer?")
return (ram_size, disks_max)
| shan-memery/mcm-oss | mcm_oss/cli.py | cli.py | py | 2,015 | python | en | code | 0 | github-code | 36 |
1540076990 | '''
Implementation of Sieve Of Eratosthenes:
Time Complexity: O(N Log(Log N))
Space Complexity: O(N)
'''
def sieve(n):
if n <= 1:
return None
from math import sqrt
numbers = [True for i in range(n+1)]
primes = []
numbers[0] = False; numbers[1] = False # Since the numbers 0 and 1 are not considered as Prime.
for i in range(2, int(sqrt(n+1))+1): # Only need to traverse till the square root of the numbers
if numbers[i] == True:
primes.append(i)
for j in range(i*2, n+1, i): # Marking off the multiples of i
numbers[j] = False
# Append the left overs in the list - numbers to primes
for i in range(i+1, n+1):
if numbers[i]:
primes.append(i)
return primes
print(sieve(25))
print(sieve(36))
print(sieve(41))
print(sieve(0))
print(sieve(98))
| puneeth1999/progamming-dsa-with-python | Week-2/AdditionalResouces/#2_0_sieveOfEratosthenes.py | #2_0_sieveOfEratosthenes.py | py | 771 | python | en | code | 2 | github-code | 36 |
37298965152 | import time
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
import json
from pymongo import MongoClient
i = 0
client = MongoClient('localhost',27017)
db=client.comment
collection=db.comment
collection2=db.after
def sentiment_classify(data):
access_token=''
http=urllib3.PoolManager()
url='https://aip.baidubce.com/rpc/2.0/nlp/v1/sentiment_classify?access_token='+access_token
params={'text':data}
#进行json转换的时候,encode编码格式不指定也不会出错
encoded_data = json.dumps(params).encode('GBK')
try:
request=http.request('POST',
url,
body=encoded_data,
headers={'Content-Type':'application/json'})
result = str(request.data,'GBK')
result = json.loads(result)
return result['items'][0]['sentiment']
except Exception as e:
if result.get('error_code') == 18:
print("error:qps limit",i, e, data, result)
time.sleep(0.2)
return sentiment_classify(data)
def data_processing():
collection2.remove()
for item in collection.find():
global i
i+=1
comment = item.get('content')
sentiment = sentiment_classify(comment)
collection2.insert({'comment': comment,'sentiment':sentiment})
data_processing()
| LogicJake/data_analysis | classfy/label.py | label.py | py | 1,396 | python | en | code | 2 | github-code | 36 |
17849746047 | from .data_metabolite_to_standard_name_dict import data_metabolite_to_standard_name_dict
from ..complete_dataset_class import CompleteDataset, natural_distribution_anti_correction, check_negative_data_array
from scripts.src.common.config import DataType, Direct, Keywords as CommonKeywords
from ..common_functions import average_mid_data_dict, glucose_infusion_input_metabolite_obj_dict_generator
from ..config import default_glucose_infusion_labeled_ratio
from .c13_glucose_enrichment_plasma import glucose_enrichment_plasma_dict
class Keyword(object):
tissue = 'tissue'
patient = 'patient'
index = 'index'
kidney = 'kidney'
carcinoma = 'carcinoma'
brain = 'brain'
index_average_list = [1, 2, 3]
def input_metabolite_data_obj_dict_generator(tissue_name, tissue_index):
if tissue_name == Keyword.kidney or tissue_name == Keyword.carcinoma:
current_label_ratio = glucose_enrichment_plasma_dict[tissue_index]
else:
current_label_ratio = default_glucose_infusion_labeled_ratio
current_input_metabolite_obj_dict = glucose_infusion_input_metabolite_obj_dict_generator(
current_label_ratio)
return current_input_metabolite_obj_dict
class SpecificParameters(CompleteDataset):
def __init__(self):
super().__init__()
self.mixed_compartment_list = ('c', 'm')
self.current_direct = '{}/renal_carcinoma'.format(Direct.data_direct)
self.file_path = '{}/data.xlsx'.format(self.current_direct)
self.experiment_name_prefix_list = ['kidney', 'carcinoma', 'brain']
self.test_experiment_name_prefix = 'brain'
self.test_tissue_index = 1
self.test_repeat_index = 1
self.exclude_metabolites_dict = {
'brain': {'3-phosphoglycerate'}
}
self._complete_data_parameter_dict_dict = {
current_sheet_name: {
'xlsx_file_path': self.file_path,
'xlsx_sheet_name': current_sheet_name,
'index_col_name': CommonKeywords.metabolite_name_col,
'mixed_compartment_list': self.mixed_compartment_list,
'to_standard_name_dict': data_metabolite_to_standard_name_dict}
for current_sheet_name in self.experiment_name_prefix_list}
self._test_data_parameter_dict_dict = {
DataType.test: {
'xlsx_file_path': self.file_path,
'xlsx_sheet_name': self.test_experiment_name_prefix,
'index_col_name': CommonKeywords.metabolite_name_col,
'mixed_compartment_list': self.mixed_compartment_list,
'to_standard_name_dict': data_metabolite_to_standard_name_dict}}
self.complete_input_metabolite_data_dict = {}
@staticmethod
def project_name_generator(tissue_name, tissue_index, repeat_index):
return '{}__{}_{}'.format(tissue_name, tissue_index, repeat_index)
def add_data_sheet(self, sheet_name, current_data_dict):
if self.anti_correction:
for column_name, each_column_data_dict in current_data_dict.items():
natural_distribution_anti_correction(each_column_data_dict)
check_negative_data_array(current_data_dict, [])
final_result_dict = self.complete_dataset
if sheet_name not in final_result_dict:
final_result_dict[sheet_name] = {}
for data_label, specific_data_dict in current_data_dict.items():
_, tissue_index_str, repeat_index_str = data_label.split('_')
tissue_index = int(tissue_index_str)
repeat_index = int(repeat_index_str)
try:
current_excluded_metabolites_set = self.exclude_metabolites_dict[sheet_name]
except KeyError:
current_excluded_metabolites_set = {}
for excluded_metabolite_name in current_excluded_metabolites_set:
pop_item = specific_data_dict.pop(excluded_metabolite_name, None)
if tissue_index not in final_result_dict[sheet_name]:
final_result_dict[sheet_name][tissue_index] = {}
final_result_dict[sheet_name][tissue_index][repeat_index] = specific_data_dict
def _complete_return_dataset(self, param_dict):
tissue_name = param_dict[Keyword.tissue]
tissue_index = param_dict[Keyword.patient]
repeat_index = param_dict[Keyword.index]
if repeat_index == CommonKeywords.average:
final_target_metabolite_data_dict = average_mid_data_dict(
self.complete_dataset[tissue_name][tissue_index], Keyword.index_average_list)
else:
final_target_metabolite_data_dict = self.complete_dataset[
tissue_name][tissue_index][repeat_index]
project_name = self.project_name_generator(tissue_name, tissue_index, repeat_index)
final_input_metabolite_data_obj_dict = input_metabolite_data_obj_dict_generator(tissue_name, tissue_index)
return project_name, final_target_metabolite_data_dict, final_input_metabolite_data_obj_dict
def _test_return_dataset(self):
final_target_metabolite_data_dict = self.complete_dataset[
DataType.test][self.test_tissue_index][self.test_repeat_index]
project_name = DataType.test
final_input_metabolite_data_dict = None
return project_name, final_target_metabolite_data_dict, final_input_metabolite_data_dict
| LocasaleLab/Automated-MFA-2023 | scripts/data/renal_carcinoma/specific_data_parameters.py | specific_data_parameters.py | py | 5,412 | python | en | code | 0 | github-code | 36 |
28198353586 | """
FLUX: OPTIMUM RANGE
===================
"""
from math import isclose
from pathlib import Path
from typing import Literal
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from matplotlib.legend_handler import HandlerTuple
from pandas import DataFrame
from diive.core.plotting.plotfuncs import save_fig
class FindOptimumRange:
def __init__(self,
df: DataFrame,
xcol: str,
ycol: str,
n_vals_per_bin: int = 300,
bins_agg: Literal['median'] = 'median',
rwinsize: float = 0.1,
ragg: Literal['mean'] = 'mean',
define_optimum: Literal['min', 'max'] = 'max'):
"""
Find x range for optimum y
First, x data are aggregated in y bins. By default, the median
value of x is calculated for each y bin (*bins_agg*). The number
of bins that is used is defined by total length of data divided
by *n_vals_per_bin*, i.e., each bin should contain e.g. 300 values.
Then, the rolling mean (*ragg*) with window size *rwinsize* is
calculated across all binned values. Here, *rwinsize* is given as
the fraction of the total number of detected bins. The optimum is
detected as the maximum (or other, *define_optimum*) of the values
found in the rolling aggregation.
Example: VPD (x) range where NEE (y) carbon uptake is highest (=smallest number)
Args:
df: Data
xcol: Column name of x in df
ycol: Column name of y in df
n_vals_per_bin: Number of values per x bin
bins_agg: How data in bins are aggregated
rwinsize: Window size for rolling aggregation, expressed as fraction of
the total number of bins. The total number of bins is calculated
from the total length of the data and *n_vals_per_bin*. The resulting
window size is then an integer value that is used in further calculations.
If the integer window size results in an even number, +1 is added since
the window size must be an odd number.
ragg: Rolling aggregation that is used in the rolling window.
define_optimum: Optimum can be based on 'min' or 'max'
"""
self.df = df[[xcol, ycol]].copy()
self.xcol = xcol
self.ycol = ycol
self.n_vals_per_bin = n_vals_per_bin
self.bins_agg = bins_agg
self.rwinsize = rwinsize
self.ragg = ragg
self.define_optimum = define_optimum
self._results_optrange = {}
@property
def results_optrange(self) -> dict:
"""Return optimum range results"""
if not self._results_optrange:
raise Exception('Results for optimum range are empty')
return self._results_optrange
def find_optimum(self):
# self._prepare_data() todo?
bins_df, bin_aggs_df, n_xbins = self._divide_xdata_into_bins()
winsize = int(n_xbins * self.rwinsize)
winsize = winsize + 1 if (winsize % 2 == 0) else winsize # Must be odd number
rbin_aggs_df = self._rolling_agg(bin_aggs_df=bin_aggs_df,
use_bin_agg=self.bins_agg,
rolling_agg=self.ragg,
winsize=winsize)
roptimum_bin, roptimum_val = self._find_rolling_optimum(rolling_df=rbin_aggs_df,
use_rolling_agg=self.ragg)
# rwinsize = int(num_xbins / 5) # Window size for rolling aggs
optimum_xstart, optimum_xend, optimum_ymean, \
optimum_start_bin, optimum_end_bin = self._get_optimum_range(grouped_df=bin_aggs_df,
roptimum_bin=roptimum_bin,
winsize=winsize)
self._validate(roptimum_val=roptimum_val, optimum_ymean=optimum_ymean)
vals_in_optimum_range_df = \
self._values_in_optimum_range(optimum_xstart=optimum_xstart, optimum_xend=optimum_xend)
self._results_optrange = dict(
optimum_xstart=optimum_xstart,
optimum_xend=optimum_xend,
optimum_ymean=optimum_ymean,
optimum_start_bin=optimum_start_bin,
optimum_end_bin=optimum_end_bin,
bin_aggs_df=bin_aggs_df,
rbin_aggs_df=rbin_aggs_df,
rwinsize=winsize,
roptimum_bin=roptimum_bin,
roptimum_val=roptimum_val,
n_xbins=n_xbins,
xcol=self.xcol,
ycol=self.ycol,
vals_in_optimum_range_df=vals_in_optimum_range_df
)
def _values_in_optimum_range(self, optimum_xstart: float, optimum_xend: float) -> pd.DataFrame:
df = self.df[[self.xcol, self.ycol]].copy()
# Full data range
fullrange_df = df.groupby(df.index.year).agg({self.xcol: ['count', 'mean']})
xcounts_df = pd.DataFrame()
# xcounts_df['vals_total'] = df.groupby(df.index.year).agg({'count'})
xcounts_df['vals_total'] = \
df.groupby(df.index.year).agg(vals_total=(self.xcol, 'count'))
# Data in optimum
_filter = (df[self.xcol] > optimum_xstart) & (df[self.xcol] <= optimum_xend)
xcounts_df['vals_inoptimum'] = \
df.loc[_filter].groupby(df.loc[_filter].index.year).agg(vals_inoptimum=(self.xcol, 'count'))
# Above optimum
_filter = (df[self.xcol] > optimum_xend)
xcounts_df['vals_aboveoptimum'] = \
df.loc[_filter].groupby(df.loc[_filter].index.year).agg(vals_aboveoptimum=(self.xcol, 'count'))
# Below optimum
_filter = (df[self.xcol] <= optimum_xstart)
xcounts_df['vals_belowoptimum'] = \
df.loc[_filter].groupby(df.loc[_filter].index.year).agg(vals_belowoptimum=(self.xcol, 'count'))
# Percentages
xcounts_df['vals_inoptimum_perc'] = xcounts_df['vals_inoptimum'].div(xcounts_df['vals_total']).multiply(100)
xcounts_df['vals_aboveoptimum_perc'] = xcounts_df['vals_aboveoptimum'].div(xcounts_df['vals_total']).multiply(
100)
xcounts_df['vals_belowoptimum_perc'] = xcounts_df['vals_belowoptimum'].div(xcounts_df['vals_total']).multiply(
100)
# NaNs correspond to zero,
# e.g. if no values above optimum are found
xcounts_df = xcounts_df.fillna(0)
return xcounts_df
def _prepare_data(self):
# Keep x values > 0
self.df = self.df.loc[self.df[self.xcol] > 0, :]
def _divide_xdata_into_bins(self) -> tuple[DataFrame, DataFrame, int]:
"""
Divide x data into bins
Column w/ bin membership is added to data
Args:
n_xbins: number of bins
"""
bins_df = self.df.copy()
# Detect number of x bins
n_xbins = int(len(bins_df) / self.n_vals_per_bin)
# Divide data into bins and add as column
xbins = pd.qcut(bins_df[self.xcol], n_xbins, duplicates='drop') # How awesome!
bins_df = bins_df.assign(xbins=xbins)
# Aggregate by bin membership
bin_aggs_df = bins_df.groupby('xbins').agg({self.bins_agg, 'count'})
return bins_df, bin_aggs_df, n_xbins
def _rolling_agg(self, bin_aggs_df, use_bin_agg, winsize, rolling_agg):
rolling_df = bin_aggs_df[self.ycol][use_bin_agg].rolling(winsize, center=True)
return rolling_df.agg({rolling_agg, 'std'}).dropna()
def _find_rolling_optimum(self, rolling_df: DataFrame, use_rolling_agg: str = 'mean'):
"""Find optimum bin in rolling data
The rolling data is scanned for the bin with the highest or lowest value.
"""
# Find bin with rolling mean min or max (e.g. max carbon uptake = minimum NEE value)
roptimum_bin = None # Index given as bin interval
roptimum_val = None # Value at bin interval
if self.define_optimum == 'min':
roptimum_bin = rolling_df[use_rolling_agg].idxmin()
roptimum_val = rolling_df[use_rolling_agg][roptimum_bin]
elif self.define_optimum == 'max':
roptimum_bin = rolling_df[use_rolling_agg].idxmax()
roptimum_val = rolling_df[use_rolling_agg].iloc[roptimum_bin]
print(f"Optimum {self.define_optimum} found in class: {roptimum_bin} / value: {roptimum_val}")
return roptimum_bin, roptimum_val
def _get_optimum_range(self, grouped_df: DataFrame, roptimum_bin: pd.IntervalIndex, winsize: int):
"""Get data range (start and end) that was used to calculate rolling optimum"""
# Find integer location of bin where rolling optimum value (y min or y max) was found
int_loc = grouped_df.index.get_loc(roptimum_bin)
print(f"Index integer location of found optimum: {int_loc} / {grouped_df.index[int_loc]}")
# Get data range start and end
roptimum_start_ix = int_loc - (int(winsize / 2))
roptimum_end_ix = int_loc + (int(winsize / 2) + 1) # was +1 b/c end of range not included in slicing
# Optimum end index cannot be larger than available indices
roptimum_end_ix = len(grouped_df) - 1 if roptimum_end_ix > len(grouped_df) - 1 else roptimum_end_ix
# Optimum start index cannot be smaller than the first available index 0
roptimum_start_ix = 0 if roptimum_start_ix < 0 else roptimum_start_ix
# Get data range indices
optimum_start_bin = grouped_df.iloc[roptimum_start_ix].name
optimum_end_bin = grouped_df.iloc[roptimum_end_ix].name
optimum_range_xstart = optimum_start_bin.left
optimum_range_xend = optimum_end_bin.right
optimum_range_ymean = grouped_df[self.ycol]['median'].iloc[roptimum_start_ix:roptimum_end_ix].mean()
return optimum_range_xstart, optimum_range_xend, optimum_range_ymean, \
optimum_start_bin, optimum_end_bin
def _validate(self, roptimum_val, optimum_ymean):
check = isclose(roptimum_val, optimum_ymean, abs_tol=10 ** -3)
if check:
print("Validation OK.")
else:
print("(!)Validation FAILED.")
assert isclose(roptimum_val, optimum_ymean)
def showfig(self,
saveplot: bool = False,
title: str = None,
path: Path or str = None):
fig = plt.figure(figsize=(16, 9))
gs = gridspec.GridSpec(4, 1) # rows, cols
gs.update(wspace=.2, hspace=.5, left=.05, right=.95, top=.95, bottom=.05)
ax1 = fig.add_subplot(gs[0:2, 0])
ax2 = fig.add_subplot(gs[2, 0])
ax3 = fig.add_subplot(gs[3, 0])
ax = self.plot_vals_in_optimum_range(ax=ax1)
ax = self.plot_bin_aggregates(ax=ax2)
ax = self.plot_rolling_bin_aggregates(ax=ax3)
fig.show()
if saveplot:
save_fig(fig=fig, title=title, path=path)
def plot_vals_in_optimum_range(self, ax):
"""Plot optimum range: values in, above and below optimum per year"""
# kudos: https://matplotlib.org/stable/gallery/lines_bars_and_markers/horizontal_barchart_distribution.html#sphx-glr-gallery-lines-bars-and-markers-horizontal-barchart-distribution-py
# Get data
df = self.results_optrange['vals_in_optimum_range_df'].copy()
plotcols = ['vals_inoptimum_perc', 'vals_aboveoptimum_perc', 'vals_belowoptimum_perc']
df = df[plotcols]
df = df.round(1)
# xcol = results_optrange['xcol']
# ycol = results_optrange['ycol']
# Names of categories, shown in legend above plot
category_names = ['values in optimum range (%)', 'above optimum range (%)', 'below optimum range (%)']
# category_names = ['vals_inoptimum_perc', 'vals_aboveoptimum_perc', 'vals_belowoptimum_perc']
# Format data for bar plot
results = {}
for ix, row in df.iterrows():
results[ix] = df.loc[ix].to_list()
year_labels = list(results.keys())
data = np.array(list(results.values()))
data_cum = data.cumsum(axis=1)
category_colors = plt.colormaps['RdYlBu_r'](np.linspace(0.20, 0.80, data.shape[1]))
# fig, ax = plt.subplots(figsize=(9.2, 5))
ax.invert_yaxis()
ax.xaxis.set_visible(False)
ax.set_xlim(0, np.sum(data, axis=1).max())
for i, (colname, color) in enumerate(zip(category_names, category_colors)):
widths = data[:, i]
starts = data_cum[:, i] - widths
rects = ax.barh(year_labels, widths, left=starts, height=0.9,
label=colname, color=color)
r, g, b, _ = color
text_color = 'white' if r * g * b < 0.5 else 'darkgrey'
ax.bar_label(rects, label_type='center', color=text_color)
ax.legend(ncol=len(category_names), bbox_to_anchor=(0, 1),
loc='lower left', fontsize='small')
# default_format(ax=ax, txt_xlabel="year", txt_ylabel=f'counts',
# txt_ylabel_units='[#]')
# default_grid(ax=ax)
return ax
def plot_bin_aggregates(self, ax):
"""Plot y median in bins of x"""
# Get data
bin_aggs_df = self.results_optrange['bin_aggs_df'].copy()
xcol = self.results_optrange['xcol']
ycol = self.results_optrange['ycol']
n_xbins = self.results_optrange['n_xbins']
optimum_start_bin = self.results_optrange['optimum_start_bin']
optimum_end_bin = self.results_optrange['optimum_end_bin']
optimum_xstart = self.results_optrange['optimum_xstart']
optimum_xend = self.results_optrange['optimum_xend']
# Find min/max of y, used for scaling yaxis
ymax = bin_aggs_df[ycol]['median'].max()
ymin = bin_aggs_df[ycol]['median'].min()
ax.set_ylim(ymin, ymax)
# Show rolling mean
bin_aggs_df[ycol]['median'].plot(ax=ax, zorder=99,
title=f"{ycol} medians in {n_xbins} bins of {xcol}")
# Show optimum range
optimum_start_bin_ix = bin_aggs_df.index.get_loc(optimum_start_bin)
optimum_end_bin_ix = bin_aggs_df.index.get_loc(optimum_end_bin)
ax.axvline(optimum_start_bin_ix)
ax.axvline(optimum_end_bin_ix)
area_opr = ax.fill_between([optimum_start_bin_ix,
optimum_end_bin_ix],
ymin, ymax,
color='#FFC107', alpha=0.5, zorder=1,
label=f"optimum range {self.define_optimum} between {optimum_xstart} and {optimum_xend}")
l = ax.legend(
[area_opr],
[area_opr.get_label()],
scatterpoints=1,
numpoints=1,
handler_map={tuple: HandlerTuple(ndivide=None)},
ncol=2)
def plot_rolling_bin_aggregates(self, ax):
"""Plot rolling mean of y medians in bins of x"""
# Get data
rbin_aggs_df = self.results_optrange['rbin_aggs_df'].copy()
xcol = self.results_optrange['xcol']
ycol = self.results_optrange['ycol']
n_xbins = self.results_optrange['n_xbins']
optimum_start_bin = self.results_optrange['optimum_start_bin']
optimum_end_bin = self.results_optrange['optimum_end_bin']
optimum_xstart = self.results_optrange['optimum_xstart']
optimum_xend = self.results_optrange['optimum_xend']
# Find min/max across dataframe, used for scaling yaxis
rbin_aggs_df['mean+std'] = rbin_aggs_df['mean'].add(rbin_aggs_df['std'])
rbin_aggs_df['mean-std'] = rbin_aggs_df['mean'].sub(rbin_aggs_df['std'])
dfmax = rbin_aggs_df[['mean+std', 'mean-std']].max().max()
dfmin = rbin_aggs_df.min().min()
ax.set_ylim(dfmin, dfmax)
# Show rolling mean
rbin_aggs_df.plot(ax=ax, y='mean', yerr='std', zorder=99,
title=f"Rolling mean of {ycol} medians in {n_xbins} bins of {xcol}")
# Show optimum range
optimum_start_bin_ix = rbin_aggs_df.index.get_loc(optimum_start_bin)
optimum_end_bin_ix = rbin_aggs_df.index.get_loc(optimum_end_bin)
ax.axvline(optimum_start_bin_ix)
ax.axvline(optimum_end_bin_ix)
area_opr = ax.fill_between([optimum_start_bin_ix,
optimum_end_bin_ix],
dfmin, dfmax,
color='#FFC107', alpha=0.5, zorder=1,
label=f"optimum range {self.define_optimum} between {optimum_xstart} and {optimum_xend}")
l = ax.legend(
[area_opr],
[area_opr.get_label()],
scatterpoints=1,
numpoints=1,
handler_map={tuple: HandlerTuple(ndivide=None)},
ncol=2)
def example():
pd.options.display.width = None
pd.options.display.max_columns = None
pd.set_option('display.max_rows', 3000)
pd.set_option('display.max_columns', 3000)
# Test data
from diive.core.io.files import load_pickle
df_orig = load_pickle(
filepath=r"L:\Dropbox\luhk_work\20 - CODING\26 - NOTEBOOKS\GL-NOTEBOOKS\_data\ch-dav\CH-DAV_FP2022.1_1997-2022.08_ID20220826234456_30MIN.diive.csv.pickle")
# # Check columns
# import fnmatch
# [print(col) for col in alldata_df.columns if any(fnmatch.fnmatch(col, ids) for ids in ['NEE_CUT_50*'])]
# Select daytime data between May and September
df = df_orig.copy()
df = df.loc[(df.index.month >= 5) & (df.index.month <= 9)]
df = df.loc[df['PotRad_CUT_REF'] > 20]
# Optimum range
optrange = FindOptimumRange(df=df, xcol='RH', ycol='NEE_CUT_REF_f', define_optimum="min", rwinsize=0.3)
optrange.find_optimum()
optrange.plot_results()
if __name__ == '__main__':
example()
| holukas/diive | diive/pkgs/analyses/optimumrange.py | optimumrange.py | py | 18,102 | python | en | code | 0 | github-code | 36 |
27239543439 | # from django.shortcuts import render
from django.views.generic import ListView, DetailView, UpdateView, CreateView, DeleteView # импортируем класс, который говорит нам о том, что в этом представлении мы будем выводить список объектов из БД
from .models import Post
from datetime import datetime
from .filters import PostFilter
from .forms import PostForm # импортируем нашу форму
from django.contrib.auth.mixins import LoginRequiredMixin
from django.core.paginator import Paginator
from django.contrib.auth.mixins import PermissionRequiredMixin
class PostList(ListView):
model = Post # указываем модель, объекты которой мы будем выводить
template_name = 'news.html' # указываем имя шаблона, в котором будет лежать html,
# в котором будут все инструкции о том, как именно пользователю должны вывестись наши объекты
context_object_name = 'news' # это имя списка, в котором будут лежать все объекты,
# его надо указать, чтобы обратиться к самому списку объектов через html-шаблон
queryset = Post.objects.order_by('-id')
form_class = PostForm # добавляем форм класс, чтобы получать доступ к форме через метод POST
#paginate_by = 1
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['time_now'] = datetime.utcnow() # добавим переменную текущей даты time_now
context['value1'] = None # добавим ещё одну пустую переменную, чтобы на её примере посмотреть работу
# другого фильтра
return context
# создаём представление в котором будет детали конкретного отдельного товара
class PostDetail(DetailView):
model = Post # модель всё та же, но мы хотим получать детали конкретно отдельного товара
template_name = 'post.html' # название шаблона будет post.html
context_object_name = 'post' # название объекта. в нём будет
class Search(ListView):
model = Post
template_name = 'search.html'
context_object_name = 'search'
ordering = ['-time_in']
paginate_by = 1 # поставим постраничный вывод в один элемент
def get_context_data(self, **kwargs): # забираем отфильтрованные объекты переопределяя метод
# get_context_data у наследуемого класса (привет полиморфизм, мы скучали!!!)
context = super().get_context_data(**kwargs)
context['filter'] = PostFilter(self.request.GET, queryset=self.get_queryset()) # вписываем наш фильтр
# в контекст
return context
class PostCreateView(PermissionRequiredMixin,CreateView):
template_name = 'add.html'
form_class = PostForm
permission_required = ('news.add_post',)
# дженерик для редактирования объекта
class PostEditView(LoginRequiredMixin, PermissionRequiredMixin, UpdateView):
template_name = 'edit.html'
form_class = PostForm
permission_required = ('news.change_post',)
# метод get_object мы используем вместо queryset, чтобы получить информацию об объекте
# который мы собираемся редактировать
def get_object(self, **kwargs):
id_1 = self.kwargs.get('pk')
return Post.objects.get(id=id_1)
# дженерик для удаления товара
class PostDeleteView(PermissionRequiredMixin, DeleteView):
template_name = 'delete.html'
queryset = Post.objects.all()
permission_required = ('news.delete_post',)
success_url = '/news/' | pvlrmv/newspaper | NewsPaper/news/views.py | views.py | py | 4,311 | python | ru | code | 0 | github-code | 36 |
22355735455 | import pytest
import mlrun.common.schemas
import mlrun.runtimes
def test_enum_yaml_dump():
function = mlrun.new_function("function-name", kind="job")
function.status.state = mlrun.common.schemas.FunctionState.ready
print(function.to_yaml())
@pytest.mark.parametrize(
"exclude_params,expected_result,is_empty",
[
(
True,
(
'{"spec": {"outputs": [], "secret_sources": [], "notifications": [{"kind": '
'"webhook", "name": "notification-test", "message": "completed", "severity": '
'"info", "when": ["completed", "error"], "condition": ""}]}, "metadata": '
'{"iteration": 0}, "status": {"state": "created"}}'
),
False,
),
(
False,
(
'{"spec": {"outputs": [], "secret_sources": [], "notifications": [{"kind": '
'"webhook", "name": "notification-test", "message": "completed", "severity": '
'"info", "when": ["completed", "error"], "condition": "", "params": {"url": '
'"https://url", "method": "PUT", "override_body": "AAAAAAAAAAAAAAAAAAAA"}}]}, '
'"metadata": {"iteration": 0}, "status": {"state": "created"}}'
),
False,
),
(
True,
(
'{"spec": {"outputs": [], "secret_sources": []}, "metadata": {"iteration": '
'0}, "status": {"state": "created"}}'
),
True,
),
(
False,
(
'{"spec": {"outputs": [], "secret_sources": []}, "metadata": {"iteration": '
'0}, "status": {"state": "created"}}'
),
True,
),
],
)
def test_runobject_to_json_with_exclude_params(
exclude_params, expected_result, is_empty
):
run_object_to_test = mlrun.model.RunObject()
notification = mlrun.model.Notification(
kind="webhook",
when=["completed", "error"],
name="notification-test",
message="completed",
condition="",
severity="info",
params={"url": "https://url", "method": "PUT", "override_body": "A" * 20},
)
run_object_to_test.spec.notifications = [] if is_empty else [notification]
# Call the to_json function with the exclude_notifications_params parameter
json_result = run_object_to_test.to_json(
exclude_notifications_params=exclude_params
)
# Check if the JSON result matches the expected result
assert json_result == expected_result
# Ensure the 'params' attribute of the notification is set back to the object
if not is_empty:
for notification in run_object_to_test.spec.notifications:
assert notification.params
| mlrun/mlrun | tests/test_model.py | test_model.py | py | 2,821 | python | en | code | 1,129 | github-code | 36 |
35146672008 | import matplotlib.pyplot as plt
import numpy as np
fil = open("Breakout_step_RL")
ret_RL = []
for line in fil:
x = float(line)
if(x==-10):
x=0
ret_RL.append(x)
fil.close()
# fil = open("breakout_aveReturn")
# ret_MT = []
# for line in fil:
# ret_MT.append(float(line))
# fil.close()
# fil = open("Step_RBF_FA")
# ret_RBF = []
# for line in fil:
# ret_RBF.append(float(line))
# fil.close()
print(np.average(ret_RL))
# plt.plot(ret_MT,label="TileCoding")
plt.plot(ret_RL,label="TileCoding with Velocity")
# plt.plot(ret_RL,label="REINFORCE")
plt.show() | sidistic/Atari-Breakout-Reinforcement-Learning | graph.py | graph.py | py | 581 | python | en | code | 0 | github-code | 36 |
31025430607 | import numpy as np
import cv2
import faceTools
import moodTools
from PIL import Image
emojis_data = {
'angry': cv2.imread("./data/emojis/Angry.png"),
'disgust': cv2.imread("./data/emojis/Poisoned.png"),
'fear': cv2.imread("./data/emojis/Fearful.png"),
'happy': cv2.imread("./data/emojis/Happy.png"),
'sad': cv2.imread("./data/emojis/Crying.png"),
'surprise': cv2.imread("./data/emojis/Omg.png"),
'neutral': cv2.imread("./data/emojis/Neutral.png")
}
cap = cv2.VideoCapture(0)
if not cap.isOpened():
print("Cannot open camera")
exit()
face_cascade = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
Claudia = moodTools.callModel()
while True:
# Capture frame-by-frame
ret, frame = cap.read()
# if frame is read correctly ret is True
if not ret:
print("Can't receive frame. Exiting ...")
break
faces = faceTools.findFaces(frame, face_cascade)
if faces is not None:
for element in faces:
mood = moodTools.predict(Claudia, element[0])
print(mood)
(x,y,w,h) = element[1]
emoji = emojis_data[mood]
# Check if the tilting has been calculated
if element[2] is not None:
emoji = Image.fromarray(emoji)
emoji = np.array(emoji.rotate(int(-element[2])))
# Fit the emoji to the exact size of the face
emoji = faceTools.resize(emoji, target_size=(w, h), to_gray=False)
frame[y:y+h, x:x+w, :] = emoji
# Display the resulting frame
font = cv2.FONT_HERSHEY_SIMPLEX
# Use putText() method for
# inserting text on video
cv2.putText(frame,
'Press q to exit',
(50, 50),
font, 1,
(0, 255, 255),
2,
cv2.LINE_4)
cv2.imshow('frame', frame)
# If the key pressed is "q" (quit)
if cv2.waitKey(10) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows() | CVandermies/Facelook | main.py | main.py | py | 2,121 | python | en | code | 0 | github-code | 36 |
24752052108 | import os
from sqlalchemy import (
Column,
MetaData,
String, Integer, Float,
Table,
Text,
ForeignKey,
create_engine,
select
)
from domain.repositories import RepositoryInterface
metadata = MetaData()
users_table = Table(
'user', metadata,
Column('userId', Integer, primary_key=True),
Column('description', Text, nullable=True),
Column('userName', String(10), nullable=True),
Column('deptId', Integer, ForeignKey("department.deptId"), nullable=True)
)
departments_table = Table(
'department', metadata,
Column('deptId', Integer, primary_key=True),
Column('description', Text, nullable=True),
Column('deptName', String(10), nullable=True)
)
###################### Common Function #####################
class MySqlAdapter(RepositoryInterface):
def __init__(self, database_uri=None):
uri = database_uri or os.getenv('DB_URI')
db_engine = create_engine(uri, convert_unicode=True, echo=True)
self.__create_tables_if_not_exists(db_engine)
self.__connection = db_engine.connect()
def close_db_connection(self, db_connection):
try:
db_connection.close()
except:
pass
def __create_tables_if_not_exists(self, db_engine):
departments_table.create(db_engine, checkfirst=True)
users_table.create(db_engine, checkfirst=True)
###################### CRUD Function #####################
def insertUser(self, user):
with self.__connection.begin():
self.__connection.execute(
users_table.insert(),
user.as_dict()
)
def selectUserWithDeptInfo(self, userId):
with self.__connection.begin():
stmt = select([users_table, departments_table]).distinct().select_from(users_table.outerjoin(departments_table, users_table.c.deptId == departments_table.c.deptId)).where(users_table.c.userId == userId)
row = self.__connection.execute(stmt).fetchone()
return {
'userId' : row['userId'],
'userName' : row['userName'],
'deptName' : row['deptName']
} if row else None
def insertDepartment(self, department):
with self.__connection.begin():
self.__connection.execute(
departments_table.insert(),
department.as_dict()
)
############################################################ | armyost/hexagonalSampleV2 | src/app/infrastructure/adapters/mysql_adapter.py | mysql_adapter.py | py | 2,513 | python | en | code | 0 | github-code | 36 |
36086044311 | #1
for number in range (1,26):
print(number**2)
#2
y=()
while(y!="haha"):
y=input("Write 'It's a Loop: ")
#3
positive=float(input())
while(positive>0):
positive-=0.5
print(positive)
#4
a=()
while(a!="no"):
a=input("Do you want to continue? ")
print("This is the end")
#Level 1 Loops
#1
for i in range(3):
print(i)
#2
list1=list(input())
i=input()
if(i in list1):
list1.remove(i)
else:
print(list1)
print(list1)
#4
list1=[1,2,3,4,5]
for element in list1:
print(element**2)
#5
a=range(1,101)
listEven=[]
for i in range(1,101):
if(i%2==0):
listEven.append(i)
print(listEven)
#6
print(listEven)
print("\n\n\n These are numbers divisible by 4")
FOURList=[]
for k in range(1,101):
if(k%4==0):
FOURList.append(k)
print(FOURList)
print("\n\n\n These are numbers dividible by 6")
SIXList=[]
for j in range(1,101):
if(j%6==0):
SIXList.append(j)
print(SIXList)
| chrblsm/CS1-Lab | lab 17Feb,19Feb.py | lab 17Feb,19Feb.py | py | 907 | python | en | code | 3 | github-code | 36 |
18586892058 | import numpy
import scipy.interpolate
import scipy.optimize
import scipy.stats
# Columns of csv input file
incols = ['name','Vinoc','dilmin','dilfac','ntot','ninf','comments']
# Columns added to csv output file
outcols = ['mode','68lb','68ub','95lb','95ub','RM','SK']
# label/header for assay parameters
label = {
# input
'Vinoc': "Well volume (in mL)",
'dilmin': "Starting dilution",
'dilfac': "Dilution factor",
'ndils': "# dilutions",
'nreps': "# repeats/dilution",
'name': "Label",
'ninf': "# wells infected",
'ntot': "# wells total",
'comments': "Comment (optional)",
# output
'mode': 'mode log10(SIN/mL)',
'68lb': '68%CI-lo log10(SIN/mL)',
'68ub': '68%CI-hi log10(SIN/mL)',
'95lb': '95%CI-lo log10(SIN/mL)',
'95ub': '95%CI-hi log10(SIN/mL)',
'RM': 'RM log10(TCID50/mL)',
'SK': 'SK log10(TCID50/mL)',
}
# help_text associated with assay parameters
info = {
'Vinoc': "Typical value for 96-well plate is 0.1 mL.",
'dilmin': "Must be ≤ 1 (e.g. 10-fold as 0.1, 4-fold as 0.25).",
'dilfac': "Must be < 1 (e.g. 10-fold as 0.1, 4-fold as 0.25).",
'ndils': "Typically 7 or 8 or 11 or 12.",
'nreps': "Typically 4 or 6 or 8.",
'name': "An identifying label like StrainA-24h-exp1.",
'ninf': "A list separated by [,] [.] or [tab].",
'ntot': "A list separated by [,] [.] or [tab].",
'comments': "Can be anything you want (e.g. 24h).",
}
# parameter values for the example assay
example = {
'Vinoc': 0.1,
'dilmin': 0.01,
'dilfac': 0.1,
'ndils': 11,
'nreps': 8,
'name': "example",
'ninf': [8,8,8,8,8,7,7,5,2,0,0],
'ntot': [8,8,8,8,8,8,8,8,8,8,8],
'comments': '',
}
def RMSK(dilut,Npos,Ntot):
# if only one well
if len(Npos) < 2:
return (numpy.nan,numpy.nan)
# if no infected well
elif numpy.sum(Npos) == 0.0:
return (numpy.nan,numpy.nan)
# if all wells infected
elif numpy.sum(Ntot-Npos) == 0.0:
return (numpy.nan,numpy.nan)
df = abs( numpy.diff(dilut)[0] )
frac = 1.0*numpy.cumsum(Npos[::-1])[::-1]
frac = frac/(frac+numpy.cumsum(Ntot-Npos))
# Reed-Muench
idx = numpy.argmax(frac < 0.5)-1
propdist = (frac[idx]-0.5)/(frac[idx]-frac[idx+1])
RM = df*propdist - dilut[idx]
# Spearman-Kaerber
frac = 1.0*Npos/Ntot # comment out this line to use RM-like smoothing
idx = numpy.argmin( frac < 1.0 )
if idx == 0: # if frac<1 in lowest dilution column
frac = numpy.hstack((1.0,frac))
dilut = numpy.hstack((dilut[0]+df,dilut))
SK = df*numpy.trapz(frac[idx:]) - dilut[idx]
return (RM,SK)
class Assay(object):
def __init__(self, Vinoc, dilmin, dilfac, ninf, ntot):
# Save user input
self.pack = {'Vinoc':Vinoc, 'dilmin':dilmin, 'dilfac':dilfac}
# computer n (# of unspoiled wells)
self.pack['ntot'] = numpy.array(ntot)
# Compute k (# of wells infected)
self.pack['ninf'] = numpy.array(ninf)
# Compute n-k (# of wells uninfected)
self.nmks = self.pack['ntot']-self.pack['ninf']
# Raise flag if no well was infected (lower limit of detection)
if sum(self.pack['ninf']) == 0.0:
self.isempty = True
else:
self.isempty = False
# Raise flag if all wells were infected (upper limit of detection)
if sum(self.nmks) == 0.0:
self.isfull = True
else:
self.isfull = False
# Compute arg of lnqbase = exp[ - Vinoc * dilmin * dilfac^pow ]
self.VDs = Vinoc * dilmin * dilfac**numpy.arange(len(self.nmks))
# Compute the remainder of the assay payload
self.payload()
def lCmode(self):
""" Computes the mode of the posterior PDF for lCvir, and the TCID50 via the Reed-Munch and Spearmann-Kaerber estimation methods. """
if 'mode' in self.pack.keys():
return self.pack['mode']
# If no infected well: give lC upper bound
if self.isempty or self.isfull:
self.pack['mode'] = numpy.nan
return self.pack['mode']
# Estimate most likely lCvir value (mode of dist)
bracket = -numpy.log10((self.VDs[0]*10.0, self.VDs[numpy.where(self.nmks)][0], self.VDs[-1]/10.0))
res = scipy.optimize.minimize_scalar(lambda x: -self.lCcalc(x), bracket=bracket)
assert res.success, 'Could not find lC mode'
self.pack['mode'] = res.x
return self.pack['mode']
def lCcalc(self, lCvec):
""" Compute posterior likelihood distribution, i.e. value of exp(lnProb), for all elements in vector lCvec, and returns it as a vector of the same size as lCvec, suitable for plotting. """
P = numpy.ones_like(lCvec)
for VD,n,k in zip(-self.VDs,self.pack['ntot'],self.pack['ninf']):
pinfvec = -numpy.expm1(10.0**lCvec*VD)
P *= scipy.stats.binom.pmf(k,n,pinfvec)
return P
def lCdist(self, lCvec=None):
""" Creates (if not provided) and stores the lCvir vector, stores the posterior PDF vector computed by lCcalc for the values in lCvir, and computes and stores the CDF vector corresponding to the PDF for the values in lCvir. """
if lCvec is None:
if self.isempty or self.isfull:
a = -numpy.log10(self.VDs[0])-10.0
b = -numpy.log10(self.VDs[-1])+10.0
lb = scipy.optimize.brentq(lambda x: self.lCcalc(x)-0.0001,a,b)
ub = scipy.optimize.brentq(lambda x: self.lCcalc(x)-0.9999,a,b)
lCvec = numpy.linspace(lb,ub,500)
else:
lCvec = numpy.arange(0.0,1.0,0.01)
lCvec = numpy.hstack((lCvec-2,numpy.arange(-1.0,1.0,0.002),lCvec+1))
lCvec += self.lCmode()
self.pack['lCvec'] = lCvec
# Compute posterior likelihood distribution (pdf) for lVec
self.pack['pdf'] = self.lCcalc(lCvec)
# Compute CDF from posterior likelihood dist
self.pack['cdf'] = numpy.cumsum(self.pack['pdf'][1:]*numpy.diff(self.pack['lCvec']))
# Re-normalize so that CDF is 1 at Cvir= max in lCvec
self.pack['cdf'] = numpy.hstack((0.0,self.pack['cdf']))/self.pack['cdf'].max()
def lCbounds(self):
""" Computes and stores the 68% and 95% bounds of lCvir likelihood as a 4-element list: [68-lower,68-upper,95-lower, 95-upper]. """
if 'cdf' not in self.pack.keys():
self.lCdist()
if self.isempty or self.isfull:
return [numpy.nan]*4
ppf = scipy.interpolate.interp1d( self.pack['cdf'], self.pack['lCvec'], bounds_error=False, fill_value=0.0 )
subbounds = []
for frac in (0.68,0.95):
res = scipy.optimize.minimize_scalar(lambda x: ppf(x+frac)-ppf(x),bounds=(0.0,1.0-frac),method='bounded')
assert res.success, 'Could not find credible region.'
subbounds += list( ppf([res.x,res.x+frac]) )
return subbounds
def payload(self):
# Compute Reed-Muench and Spearman-Kaerber
for key,val in zip(('RM','SK'),RMSK(numpy.log10(self.VDs),self.pack['ninf'],self.pack['ntot'])):
self.pack[key] = val
self.pack['bounds'] = self.lCbounds()
self.pack['dilutions'] = numpy.log10(self.VDs/self.pack['Vinoc'])
self.pack['mode'] = self.lCmode()
self.pack['mean'] = numpy.sum(self.pack['lCvec']*self.pack['pdf'])
self.pack['mean'] /= self.pack['pdf'].sum()
return self.pack
| cbeauc/midSIN | src/__init__.py | __init__.py | py | 6,705 | python | en | code | 4 | github-code | 36 |
24399113 | import plotly.graph_objects as go
from plotly.subplots import make_subplots
import numpy as np
import maths
def mass_flow_funnel(mass_flows,moisture_content):
fig = go.Figure(go.Funnelarea(
# textinfo = [str(round(mass_flows[0],2))+" kg/h <br>Before Drying",str(round(mass_flows[1],2))+" kg/h <br>After Drying",str(mass_flows[2])+" kg/h <br>After Torrefaction"],
text = ["Before Drying at MC="+str(moisture_content[0])+"%","After Drying at MC="+str(moisture_content[1])+"%","After Torrefaction at MC="+str(moisture_content[2])+"%"],
values = mass_flows,
textinfo = 'value+text'
))
fig.update_layout(
title='Feedstock Mass Flow Requirements (kg/h)',
title_x=0.5,
showlegend=False
)
fig.update_yaxes(
showticklabels = False
)
return fig
def torr_sizing(t1,t2,cp,mfr):
reactor_diameter = np.arange(0.5,6.0,0.5)
wall_temp = np.arange(200.0,500.0,100.0)
results = np.zeros(shape=(len(reactor_diameter),len(wall_temp)))
for i in range(0,len(reactor_diameter)):
for j in range(0,len(wall_temp)):
results[i,j] = maths.get_L_torr(maths.kelvin(wall_temp[j]),maths.kelvin(t1),maths.kelvin(t2),cp,reactor_diameter[i],mfr)
fig = go.Figure()
for i in range(0,len(reactor_diameter)):
fig.add_trace(go.Scatter(x=reactor_diameter,y=results[i,:],name=(str(round(wall_temp[i],2)))))
fig.update_xaxes(title="Reactor Length (m)")
fig.update_yaxes(title="Wall Temperature (K)")
fig.update_layout(
showlegend=True,
legend=dict(title="Reactor Diameter (m)"),
title = "Minimum Reactor Wall Temperature Requirement at ",
title_x = 0.5
)
return fig
def torr_analysis(t1,t2,mfrate,d_reactor,rpm_screw,heat_loss,cp):
deltaT = np.arange(10.0,160.0,10.0)
ta_results = np.zeros(shape=(len(deltaT),8))
for i in range(0,len(deltaT)):
ta_results[i] = maths.get_thermal_analysis(t1,t2,mfrate,deltaT[i],d_reactor,rpm_screw,heat_loss,cp)
fig = make_subplots(rows=4,cols=1,subplot_titles=('Reactor Length','Residence Time','System Heat Requirement (kJ/s) for Sand as HTM','System Heat Requirement (kJ/s) for Air as HTM'))
fig.update_layout(height=1000,title="Effects of Varying Heating Rates on Reactor Parameters",title_x=0.5,showlegend=False)
# Heating Rate vs. Length
fig.add_trace(go.Scatter(x=deltaT,y=ta_results[:,1]),col=1,row=1)
fig.update_yaxes(title='m',col=1,row=1)
#Residence Time
fig.add_trace(go.Scatter(x=deltaT,y=ta_results[:,0]),col=1,row=2)
fig.update_yaxes(title='min.',col=1,row=2)
#System Heat Requirement
fig.add_trace(go.Scatter(x=deltaT,y=ta_results[:,6]),col=1,row=3)
fig.update_yaxes(title='kJ/s',col=1,row=3)
fig.add_trace(go.Scatter(x=deltaT,y=ta_results[:,7]),col=1,row=4)
fig.update_yaxes(title='kJ/s',col=1,row=4)
return fig | drpsantos/torr | 210921/charts.py | charts.py | py | 2,905 | python | en | code | 0 | github-code | 36 |
26469617014 | from ....key import Address
from ....hint import MBC_USER_STATISTICS, MBC_VOTING_CANDIDATE
from ....common import Int, MitumFactor, _hint, concatBytes
class Candidate(MitumFactor):
def __init__(self, address, nickname, manifest, count):
assert len(manifest) <= 100, 'manifest length is over 100! (len(manifest) <= 100); Candidate.__init__'
self.hint = _hint(MBC_VOTING_CANDIDATE)
self.address = Address(address)
self.nickname = nickname
self.manifest = manifest
self.count = Int(count)
def bytes(self):
bAddress = self.address.bytes()
bNickname = self.nickname.encode()
bManifest = self.manifest.encode()
bCount = self.count.bytes()
return concatBytes(bAddress, bNickname, bManifest, bCount)
def dict(self):
candidate = {}
candidate['_hint'] = self.hint.hint
candidate['address'] = self.address.address
candidate['nickname'] = self.nickname
candidate['manifest'] = self.manifest
candidate['count'] = self.count.value
return candidate
class UserStatistics(object):
def __init__(self, hp, str, agi, dex, cha, intel, vital):
self.hint = _hint(MBC_USER_STATISTICS)
self.hp = Int(hp)
self.str = Int(str)
self.agi = Int(agi)
self.dex = Int(dex)
self.cha = Int(cha)
self.intel = Int(intel)
self.vital = Int(vital)
def bytes(self):
return concatBytes(
self.hp.bytes(),
self.str.bytes(),
self.agi.bytes(),
self.dex.bytes(),
self.cha.bytes(),
self.intel.bytes(),
self.vital.bytes()
)
def dict(self):
statistics = {}
statistics['_hint'] = self.hint.hint
statistics['hp'] = self.hp.value
statistics['strength'] = self.str.value
statistics['agility'] = self.agi.value
statistics['dexterity'] = self.dex.value
statistics['charisma'] = self.cha.value
statistics['intelligence'] = self.intel.value
statistics['vital'] = self.vital.value
return statistics | ProtoconNet/mitum-py-util | src/mitumc/operation/document/blockcity/base.py | base.py | py | 2,222 | python | en | code | 2 | github-code | 36 |
21626075739 | from datetime import datetime
import os
def capture_pic(driver):
pt = datetime.now().strftime('%Y%m%m%H%M%S')
base_path = os.path.dirname(os.getcwd())
pic_name = os.path.join(base_path, 'picture', pt+'.png')
driver.get_screenshot_as_file(pic_name)
| litongtongx/test | common/picCapture.py | picCapture.py | py | 268 | python | en | code | 0 | github-code | 36 |
19221455150 | '''FINAL DRAFT OF CALC?'''
from tkinter import *
from tkinter import font as tkFont
import math as m
root = Tk()
root.title("SIMPLE CALCULATOR")
#rootlabel = Label(root, text="CALCULATOR", bg='gray3', fg = 'snow', font=("Times", 10, 'bold'))
#rootlabel.grid(row = 0,column = 2)
#root.geometry("538x540")
root.config(background='gray3')
e = Entry(root , width = 60, relief= SUNKEN, borderwidth = 8)
e.grid(row = 1 , column = 0 , columnspan = 6 , padx = 20, pady = 40)
btfont = tkFont.Font(size=12)
def click(number):
current = e.get()
e.delete(0, END)
e.insert(0, str(current) + str(number))
def input1():
global inp
inp=e.get()
e.delete(0,END)
temp=inp
global calc_output
try:
calc_output=eval(inp)
except ZeroDivisionError:
calc_output="CANNOT DIVIDE BY ZERO"
print(calc_output)
except SyntaxError:
calc_output="WRONG INPUT"
print(calc_output)
except TypeError:
calc_output="SYNTAX ERROR"
print(calc_output)
else:
print(calc_output)
e.insert(0, calc_output)
def output1():
e.insert(0,calc_output)
def clearall():
e.delete(0,END)
def close():
root.destroy()
def Sqrt():
global f_num, ans
f_num = float(e.get())
ans = m.sqrt(f_num)
e.delete(0, END)
e.insert(0, str(ans))
def sine():
global f_num,ans
f_num = float(e.get())
ans = m.sin(m.radians(f_num))
e.delete(0,END)
e.insert(0,str(ans))
def cosine():
global f_num, ans
f_num = float(e.get())
ans = m.cos(m.radians(f_num))
e.delete(0, END)
e.insert(0, str(ans))
def tangent():
global f_num, ans
f_num = float(e.get())
ans = m.tan(m.radians(f_num))
e.delete(0, END)
e.insert(0, str(ans))
def logarithm():
global f_num, ans
f_num = float(e.get())
ans = m.log10(f_num)
e.delete(0, END)
e.insert(0, str(ans))
#btinp= Button(root, text = "inp", padx = 40 , pady = 20, command = input1, bg='black',fg='snow',font=btfont)
btequal=Button(root, text = " = ", padx = 40 , pady = 20, command = input1, bg='black',fg='snow',font=btfont)
btclear=Button(root, text = " C ", padx = 40 , pady = 20, command = clearall, bg='red4',fg='snow',font=btfont)
bt1 = Button(root, text = "1", padx = 40 , pady = 20, command = lambda : click(1),font=btfont)
bt2 = Button(root, text = "2", padx = 40 , pady = 20, command = lambda : click(2),font=btfont)
bt3 = Button(root, text = "3", padx = 40 , pady = 20, command = lambda : click(3),font=btfont)
bt4 = Button(root, text = "4", padx = 40 , pady = 20, command = lambda : click(4),font=btfont)
bt5 = Button(root, text = "5", padx = 40 , pady = 20, command = lambda : click(5),font=btfont)
bt6 = Button(root, text = "6", padx = 40 , pady = 20, command = lambda : click(6),font=btfont)
bt7 = Button(root, text = "7", padx = 40 , pady = 20, command = lambda : click(7),font=btfont)
bt8 = Button(root, text = "8", padx = 40 , pady = 20, command = lambda : click(8),font=btfont)
bt9 = Button(root, text = "9", padx = 40 , pady = 20, command = lambda : click(9),font=btfont)
bt0 = Button(root, text = "0", padx = 40 , pady = 20, command = lambda : click(0),font=btfont)
decimalpt = Button(root, text=".", command= lambda: click('.'), padx=40, pady=20,font=btfont,bg='black',fg='snow')
btexponent=Button(root, text = " ^ ", padx = 40 , pady = 20, command = lambda : click('**') , bg='black',fg='snow',font=btfont)
btadd = Button(root, text = "+", padx = 40 , pady = 20, command = lambda : click('+'),font=btfont,bg='black',fg='snow')
btsub = Button(root, text = "-", padx = 40 , pady = 20, command = lambda : click('-'),font=btfont,bg='black',fg='snow')
btmul = Button(root, text = "*", padx = 40 , pady = 20, command = lambda : click('*'),font=btfont,bg='black',fg='snow')
btdiv = Button(root, text = "/", padx = 40 , pady = 20, command = lambda : click('/'),font=btfont,bg='black',fg='snow')
btclose= Button(root, text = "CLOSE", padx = 40 , pady = 20, width = 50, command = close ,font=btfont,bg='red3',fg='snow')
btopenbracket= Button(root, text = "(", padx = 40 , pady = 20, command = lambda : click('('),font=btfont,bg='black',fg='snow')
btclosebracket= Button(root, text = ")", padx = 40 , pady = 20, command = lambda : click(')'),font=btfont,bg='black',fg='snow')
btsqrt=Button(root,fg='snow',bg="black", text = "Sqt", padx = 41 , pady = 20, command = Sqrt,font=btfont)
btsin=Button(root,fg='snow',bg="black", text = "sin", padx = 41 , pady = 20, command =sine,font=btfont)
btcos=Button(root,fg='snow',bg="black", text = "cos", padx = 41 , pady = 20, command =cosine,font=btfont)
bttan=Button(root,fg='snow',bg="black", text = "tan", padx = 41 , pady = 20, command =tangent,font=btfont)
btlog=Button(root,fg='snow',bg="black", text = "log", padx = 41 , pady = 20, command =logarithm,font=btfont)
btequal.grid(row = 6, column = 3)
btclose.grid(row = 7, column = 0, columnspan = 5)
btclear.grid(row = 2,column = 3)
btopenbracket.grid( row = 2,column = 1)
btclosebracket.grid( row = 2,column = 2)
btexponent.grid( row = 6,column =4)
bt1.grid(row = 5, column = 1)
bt2.grid(row = 5, column = 2)
bt3.grid(row = 5, column = 3)
bt4.grid(row = 4, column = 1)
bt5.grid(row = 4, column = 2)
bt6.grid(row = 4, column = 3)
bt7.grid(row = 3, column = 1)
bt8.grid(row = 3, column = 2)
bt9.grid(row = 3, column = 3)
btadd.grid(row = 2, column = 4 )
btsub.grid(row = 3, column = 4 )
btmul.grid(row = 4, column = 4 )
btdiv.grid(row = 5, column = 4 )
bt0.grid(row = 6, column = 2 )
decimalpt.grid(row = 6,column = 1)
btsqrt.grid(row=2,column=0)
btsin.grid(row=3,column=0)
btcos.grid(row=4,column=0)
bttan.grid(row=5,column=0)
btlog.grid(row=6,column=0)
root.mainloop()
| Adarsh-Liju/COOL-STUFF | python_progs/CALC_FINAL.py | CALC_FINAL.py | py | 5,842 | python | en | code | 1 | github-code | 36 |
6842050847 | import cv2
import numpy as np
import re
from tqdm import tqdm
import os
import random
from PIL import Image, ImageEnhance
def augment(image):
def transform():
return random.choice([0,1,2])
# every image has to flip
transform_seed = transform()
if transform_seed == 0:
image = cv2.flip(image, 0) #horizontal
elif transform_seed == 1:
image = cv2.flip(image, 1) #vert
else:
image = cv2.flip(image, -1) #both
# every image also has to rotate
transform_seed2 = transform()
if transform_seed2 == 0:
image = cv2.rotate(image, cv2.cv2.ROTATE_90_CLOCKWISE)
elif transform_seed2 == 1:
image = cv2.rotate(image, cv2.ROTATE_180)
else:
image = cv2.rotate(image, cv2.ROTATE_90_COUNTERCLOCKWISE)
return image
# read img
# def fast_scandir(dirname):
# subfolders= [f.path for f in os.scandir(dirname) if f.is_dir()]
# for dirname in list(subfolders):
# subfolders.extend(fast_scandir(dirname))
# return subfolders
# read_dir = fast_scandir('/media/zheng/backup/shipclassification/dataset/split_aug/split/train/')
read_dir = ['/mnt/data2/Projects/BuildingDetection/ShipClassification/split/train2','/mnt/data2/Projects/BuildingDetection/ShipClassification/split/test2']
expand_times = 4
for dire in read_dir:
for filename in os.listdir(dire):
path = dire + '/' + filename
image = cv2.imread(path)
filename = filename[:-4]
for i in range(expand_times):
img_aug = augment(image)
filename = filename + '_' + str(i) + '.png'
if dire == '/mnt/data2/Projects/BuildingDetection/ShipClassification/split/train2':
save_dir = '/mnt/data2/Projects/BuildingDetection/ShipClassification/split/train3'
else:
save_dir = '/mnt/data2/Projects/BuildingDetection/ShipClassification/split/test3'
cv2.imwrite(os.path.join(save_dir, filename), img_aug)
filename = filename[:-6]
print('augment finished')
| czkat/real-time-ship-classification-by-resnet-transfer-learning-with-original-dataset | augment.py | augment.py | py | 2,111 | python | en | code | 0 | github-code | 36 |
10663868437 | # -*- coding: utf-8 -*-
"""
Created on Fri Oct 14 10:36:34 2016
@author: Neo
cp the figure ../plot/ into ../NLiu2016/
"""
import os
## figure names in ../plot/
l1 = ['Observation_span.eps', \
'Number_of_session.eps', \
'Observation_history.eps', \
'DEV_plot.eps', \
'LinearDriftOf4Sets.eps', \
'Rotation_No.eps', \
'RotAndGli.eps', \
'Simulation.eps', \
'Quality.eps', \
'LD_ours.eps', \
'39Special.eps', \
'294SouDis.eps', \
'323SouDis.eps' \
]
# WADEV_de.eps ld_4.eps
#331SouDis.eps Linear_drift_4.eps Observation_span.eps WADEV_ra.eps rot_num.eps
#Combination.eps Linear_drift_icrf1.eps Quality.eps WDEV_de.eps seslen.eps
# Linear_drift_icrf2.eps RotAndGli.eps WDEV_ra.eps
#GRank_rot.eps Linear_drift_mfv247.eps Rot_Gli.eps gli_num.eps
#GrRank_rot.eps Lineardrift2error_special.eps Rotation_No.eps group_num.eps
#LD_ours.eps Simulation.eps grp_gli_num.eps']
l2 = ['fig1\(a\).eps', \
'fig1\(b\).eps', \
'fig2.eps', \
'fig3.eps', \
'fig4.eps', \
'fig5.eps', \
'fig6.eps', \
'fig7.eps', \
'fig8.eps', \
'fig9.eps', \
'fig11.eps', \
'fig10\(a\).eps',\
'fig10\(b\).eps',\
]
if len(l1) != len(l2):
print('Error! Unequal length of these two lists.')
else:
for i in range(len(l1)):
os.system('cp ../plot/'+l1[i]+' ../manuscript/figures/'+l2[i])
print('copy figure ' + l1[i] + ' : Done!')
print('All Done!') | Niu-Liu/thesis-materials | sou-selection/progs/FiguresCopy.py | FiguresCopy.py | py | 1,918 | python | en | code | 0 | github-code | 36 |
10579079643 | def open_file(filepath):
with open(filepath) as fl:
return fl.read()
def count_words(contents):
return len(contents.split())
def count_letters(contents):
lower_content = contents.lower()
letter_dict = {}
for l in lower_content:
if l in letter_dict:
letter_dict[l] += 1
else:
letter_dict[l] = 1
return letter_dict
def to_list(content):
li = [{'letter': letter, 'count': content[letter]} for letter in content if letter.isalpha()]
li.sort(reverse=True, key=lambda x: x["count"])
return li
if __name__ == '__main__':
filepath = 'books/frankenstein.txt'
file_content = open_file(filepath)
words_count = count_words(file_content)
l_dict = count_letters(file_content)
l_li = to_list(l_dict)
print(f'--- Begin report of {filepath} ---')
print(f'{words_count} words found in the document')
print()
for item in l_li:
print(f'The {item["letter"]} character was found {item["count"]} times')
print('--- End report ---')
| winterbear2077/pythonLearn | main.py | main.py | py | 1,052 | python | en | code | 0 | github-code | 36 |
30522922561 | ################################
# mission_five.py
################################
import math
import time
from pybricks.ev3devices import *
from pybricks.parameters import *
from pybricks.robotics import *
from pybricks.iodevices import *
from pybricks.tools import wait
from pybricks.hubs import EV3Brick
from robot_18300 import robot_18300
def mission_five(r):
print("Running Mission 5")
#madeleine + Lydia going to the museum
#r.robot.straight(-323)
r.gyro_drive_straight_distance(-150,373)
wait(100)
#r.robot.turn(-45)
r.gyro_tank_turn(150,-45)
wait(100)
#r.robot.straight(-343)
r.gyro_drive_straight_distance(-150,271)
wait(100)
#Robot is facing the set change mission.
#r.robot.turn(-45)
r.gyro_tank_turn(150,-40)
wait(100)
#r.robot.straight(-391)
r.gyro_drive_straight_distance(-150,331)
wait(100)
#Used to be -321 /\
#turn torward wall
#r.robot.turn(90)
r.robot.drive(-200,76)
wait(3011)
#The robot is now at the wall
r.robot.stop()
while(r.robot.state()[1]>10):
wait(10)
r.robot.straight(160)
r.robot.turn(23)
#used to be (291)
r.robot.drive(60,0)
wait(1011)
#The robot is now at the light tower
r.robot.stop()
#Backing up
# \/ Light tower is going up
r.left_attachment_motor.run_time(-300,10000, then=Stop.HOLD, wait=False)
wait(5000)
r.robot.stop()
#Robot is Facing Augmented reality.
r.robot.straight(-40)
#r.robot.turn(35)
r.gyro_tank_turn(150,30)
wait(100)
#r.robot.drive(-75,0)
#wait(1800)
#r.robot.stop()
r.robot.straight(-107)
#At Augmented reality
r.gyro_tank_turn(70,37)
r.robot.stop()
r.robot.straight(-123)
r.robot.turn(-30)
#Augmented reality is now complreted.
| fll-18300/fall_2023 | mission_five.py | mission_five.py | py | 1,819 | python | en | code | 0 | github-code | 36 |
11162728846 | #
# Hardware:
# A USB C / PIC32 Breakout Board connected to an SSD1306-based OLED display
# (128x64 pixels) and an M5Stack joystick, both via I2C.
#
# Purpose:
# Illustrates the I2C Master functionality to manipulate two I2C Slaves for
# the purposes of the Kickstarter demo.
#
from usb_device import UsbDevice
from upside_down_display import UpsideDownDisplay
from ssd1306_i2c_slave_display import Ssd1306I2cSlaveDisplay
from keyboard import KeyboardThread
from splash_screen import SplashScreen
from font import Font
from box import Box
from tile import Tile
from ball import Ball
from paddle import Paddle
from playing_area import PlayingArea
from game_loop import GameLoop
from sampled_player import SampledPlayer
from computer_player import ComputerPlayer
from i2c_joystick_player import I2cJoystickPlayer
if __name__ == "__main__":
with UsbDevice() as usb:
usb.i2c.baud_rate_400khz()
display = UpsideDownDisplay(Ssd1306I2cSlaveDisplay(usb.i2c.slave(0x3c)))
display.initialise()
with open("font-5x8.raw", "rb") as font_fd:
font_5x8 = Font(Tile.from_raw(font_fd.read(), 32 * 6, 4 * 9), 6, 9)
playing_area = PlayingArea(display)
paddle_tile = Tile(2, 12, [0b11000000] * 12)
paddle_x_offset = 2
paddle_y_offset = 2
paddle_speed = 1.5
paddles = [
Paddle(
[playing_area.bounding_box.x + paddle_x_offset, -1],
0,
Box(
playing_area.bounding_box.x + paddle_x_offset,
playing_area.bounding_box.y + paddle_y_offset,
paddle_tile.width,
playing_area.bounding_box.height - 2 * paddle_y_offset),
paddle_tile),
Paddle(
[playing_area.bounding_box.max_x - 1 - paddle_x_offset - paddle_tile.width // 2, -1],
0,
Box(
playing_area.bounding_box.max_x - 1 - paddle_x_offset - paddle_tile.width,
playing_area.bounding_box.y + paddle_y_offset,
paddle_tile.width,
playing_area.bounding_box.height - 2 * paddle_y_offset),
paddle_tile)]
players = [
ComputerPlayer(paddles[0], max_speed = paddle_speed, difficulty = 0.1),
SampledPlayer(
I2cJoystickPlayer(paddles[1], usb.i2c.slave(0x52), 1, -128, paddle_speed / 128),
interval = 0.01),
]
game = GameLoop(
playing_area,
Ball(
playing_area.centre,
[1.8, 0],
playing_area.bounding_box,
Tile(6, 6, [
0b00110000,
0b01111000,
0b11111100,
0b11111100,
0b01111000,
0b00110000])),
players,
score_font = font_5x8,
message_font = font_5x8)
with open("lophtware-128x64.raw", "rb") as logo_fd:
logo = Tile.from_raw(logo_fd.read(), 128, 64)
SplashScreen(logo).show(display)
quit = False
def on_keyboard_input(cmd):
global quit
quit = cmd == 'q'
return quit
keyboard = KeyboardThread(on_keyboard_input)
playing_area.draw()
while not quit:
if not game.do_frame(display):
break
display.blit()
with open("thanks-128x64.raw", "rb") as thanks_fd:
thanks = Tile.from_raw(thanks_fd.read(), 128, 64)
SplashScreen(thanks).show(display)
| lophtware/UsbCPic32Breakout | src/examples/pong/python/pong.py | pong.py | py | 2,980 | python | en | code | 2 | github-code | 36 |
38844598957 | import re
f = open("customers.txt", "rt")
customers = {}
for line in f.readlines():
m_name = re.search('[A-Za-z ]+', line)
if m_name is None:
continue
name = m_name.group(0).strip()
if len(name) == 0:
continue
m_mobile = re.search(r'\d+', line)
if m_mobile is None:
continue
mobile = m_mobile.group(0)
customers[name] = mobile
f.close()
for name, mobile in sorted(customers.items()):
print(f"{name:30} {mobile}")
| srikanthpragada/PYTHON_04_APR_2022 | demo/libdemo/list_customers.py | list_customers.py | py | 481 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.