seq_id
stringlengths
4
11
text
stringlengths
113
2.92M
repo_name
stringlengths
4
125
sub_path
stringlengths
3
214
file_name
stringlengths
3
160
file_ext
stringclasses
18 values
file_size_in_byte
int64
113
2.92M
program_lang
stringclasses
1 value
lang
stringclasses
93 values
doc_type
stringclasses
1 value
stars
int64
0
179k
dataset
stringclasses
3 values
pt
stringclasses
78 values
4248917006
import os def removeComments(filename): savedFile = filename.replace('.txt', 'Copy.txt') os.rename(filename, savedFile) with open(filename, 'w') as new_file: with open(savedFile) as old_file: for line in old_file: if '#' not in line and line != '\n': new_file.write(line) return(True)
guilyx/rm-comments-cmakelists
lib/rm_comments.py
rm_comments.py
py
357
python
en
code
0
github-code
13
26870146605
from itertools import cycle def pan(s): if len(s)!=9: return False for d in "123456789": if not d in s: return False return True def cp(i,n): ps=[] for mp in range(1,n+1): ps.append(str(i*mp)) return "".join(ps) for i in range(1,999999): cps=map(cp,cycle([i]),list(range(1,30))) for cp_ in cps: if pan(cp_): print(cp_) break
zydiig/PESolution
38.py
38.py
py
424
python
en
code
0
github-code
13
33621487085
from fastapi import APIRouter, Security from fastapi.security import APIKeyHeader from fastapi.responses import JSONResponse import requests from settings import DB_SERVER_URL from src.utils.base_utils import raise_exception from src.validation_models.user_model import UserCredentialsIn router = APIRouter() @router.post("/login", tags=["authorization"]) async def login(item: UserCredentialsIn): try: response = requests.post(f"{DB_SERVER_URL}/login", json=item.dict()) return JSONResponse( status_code=response.status_code, content=response.json() ) except Exception as e: raise_exception(e) @router.get("/", tags=["authorization"]) async def get_my_profile(authorization=Security(APIKeyHeader(name="Authorization", auto_error=False))): try: response = requests.get(f"{DB_SERVER_URL}/", headers={"Authorization": authorization}) return JSONResponse( status_code=response.status_code, content=response.json() ) except Exception as e: raise_exception(e) @router.post("/logout", tags=["authorization"]) async def logout(authorization=Security(APIKeyHeader(name="Authorization", auto_error=False))): try: response = requests.post(f"{DB_SERVER_URL}/logout", headers={"Authorization": authorization}) return JSONResponse( status_code=response.status_code, content=response.json() ) except Exception as e: raise_exception(e)
iulianag/disertatie
business_logic/src/endpoints/authorization.py
authorization.py
py
1,608
python
en
code
0
github-code
13
74001434899
#!/usr/bin/python import sys import re from printer import Printer def GetComment(line): what = re.compile(".*//(.*)").match(line) if what != None and len(what.groups()) > 0: return what.groups()[0] else: return "" def Transform(filename, out): lines = open(filename).readlines() printer = Printer(out) printer.AppendLine("// Generated by inltoas.py. DO NOT EDIT!") printer.AppendLine("package client.model") printer.AppendLine("{") printer.IncIndent() printer.AppendLine("public final class WindowId") printer.AppendLine("{") printer.IncIndent() re_declare = re.compile("WINDOW_ID\(\s*(\S+)\s*\)") index = 0 for line in lines: if "WID_BEGIN_BLOCK" in line: pass # parse declcare statements what = re_declare.match(line) if what != None: msgName=what.groups()[0] comment = GetComment(line) assIndex = index + 1 printer.AppendLine('public static const %s:int = %s; // %s' % (msgName, assIndex, comment.decode("gbk").encode("utf8"))) index = assIndex continue line = line.strip() if line.startswith("//") or len(line) == 0: printer.AppendLine("%s" % line.decode("gbk").encode("utf8")) else: printer.AppendLine("// %s" % line.decode("gbk").encode("utf8")) printer.DecIndent() printer.AppendLine("}") printer.DecIndent() printer.AppendLine("}") printer.Flush() if __name__ == "__main__": """ usage: inttoas.py INL_FILE OUT_FILE """ input_path = sys.argv[1] output_path = None if len(sys.argv) > 2: output_path = sys.argv[2] Transform(input_path, output_path)
tiance7/CardDoc
sixcube/tools/packetc/src/windowIdTool.py
windowIdTool.py
py
1,858
python
en
code
0
github-code
13
4682706303
__struct_classes = {} from sydpy.types._type_base import TypeBase from sydpy import ConversionError from collections import OrderedDict from itertools import islice def Struct(*args): vals = [] names = [] for a in args: names.append(a[0]) vals.append(a[1]) # s_tuple = tuple(names) dtype=OrderedDict(list(zip(names, vals))) if args not in __struct_classes: __struct_classes[args] = type('struct', (struct,), dict(dtype=dtype)) return __struct_classes[args] class struct(TypeBase): dtype = None def __init__(self, val=[]): self._val = [] self._vld = [] for t, a in zip(self.dtype, val): val = self.dtype[t](a) self._val.append(val) try: self._vld.append(val._full()) except AttributeError: self._vld.append(True) for (_,t) in islice(self.dtype.items(), len(self._val), len(self.dtype)): self._val.append(t()) self._vld.append(False) def _replace(self, key, val): if isinstance( key, slice ) : high = max(key.start, key.stop) low = min(key.start, key.stop) if high >= self.w: raise IndexError("The index ({0}) is out of range.".format(key)) val = self._val.copy() for key in range(low, high + 1): val[key] = self.dtype(val[key - low]) elif isinstance( key, int ) : if high >= self.w: raise IndexError("The index ({0}) is out of range.".format(key)) val = self._val.copy() val[key] = self.dtype(val) else: raise TypeError("Invalid argument type.") return self.__class__(val) def _hdl_gen_ref(self, conv): s = conv._hdl_gen_ref(self._val[0]) if len(self._val) > 1: s += ", " for e in self._val[1:]: s += conv._hdl_gen_ref(e) s = "'{" + s + "}" return s @classmethod def _hdl_gen_decl(cls): pass @classmethod def _hdl_gen_call(cls, conv=None, node=None): args = [] for a in node.args: args.append(conv.obj_by_node(a)) a = cls(*args) return a._hdl_gen_ref(conv) @classmethod def deref(self, key): return self.dtype def __str__(self): return "(" + ",".join([str(e) for e in self._val]) + ")" __repr__ = __str__ # def __next__(self): # return next(iter(self.val)) def __iter__(self): return iter(self._val) def __len__(self): return len(self.dtype) def _full(self): for u in self._vld: if not u: return False return True def __getattr__(self, key): try: if key in self.dtype.keys(): return self._val[list(self.dtype.keys()).index(key)] else: return super().__getattribute__(key) except ValueError: raise AttributeError # def __setattr__(self, key, val): # try: # self._val[list(self.dtype.keys()).index(key)] = val # except ValueError: # raise AttributeError def __getitem__(self, key): if isinstance( key, slice ) : st = Struct(*islice(self.dtype.items(), key.start, key.stop)) return st(list(self._val[key])) elif isinstance( key, int ) : return self._val[key] else: raise TypeError("Invalid argument type.") def __setitem__(self, key): return self._val[key] @classmethod def _from_dict(cls, other): s = cls() for k,v in other.items(): try: i = list(s.dtype.keys()).index(k) except ValueError: raise ConversionError s._val[i] = v s._vld[i] = True return s # s[k] = v # if cls.w == other.w: # return other # else: # raise ConversionError @classmethod def _rnd(cls, rnd_gen): val = [rnd_gen._rnd(cls.dtype[t]) for t in cls.dtype] return cls(val) def _icon(self, other): for i, u in reversed(list(enumerate(self._vld))): if u: last_unset = i + 1 break else: last_unset = 0 if last_unset >= len(self): return (None, other) remain = other val = self._val.copy() while last_unset < len(self): try: dt_remain = self._val[last_unset] conv_gen = list(self.dtype.items())[last_unset][1]._convgen(remain, dt_remain) data, remain = next(conv_gen) val[last_unset] = data last_unset += 1 except StopIteration as e: remain = e.value if remain is not None: if last_unset < len(self): val[last_unset] = remain remain = None break new_self = self.__class__(val) return (new_self, remain)
bogdanvuk/sydpy
sydpy/types/struct.py
struct.py
py
5,625
python
en
code
12
github-code
13
13102772244
""" Genome language: C(P),c : command, where C is a current state, P is a previous state (() if there is no condition), c is a condition on the number of connections Command language: ++X - grow an adjacent cell in X state --X - remove adjacent cell in X state +X - connect to the closest cell in X state -X - disconnect from a cell in X state X - change state to X """ import re import automata.graph_operatations as go class Command: def __init__(self, text): action, self.state = re.match(r'([+-]*)(\w+)', text).groups() self.function = { '++': self.plus_plus, '--': self.minus_minus, '+': self.plus, '-': self.minus, '': self.change_state }[action] def plus_plus(self, c): new_cell = Cell(c.graph, self.state) go.add_vertex(c.graph, new_cell) go.add_edge(c.graph, new_cell, c, directed=False) def minus_minus(self, c): for v in c.graph[c]: if v.state == self.state: go.remove_vertex(c.graph, v) break def plus(self, c): closest = go.find_closest(c.graph, c, lambda x: x.state == self.state and not x in c.graph[c]) if closest: go.add_edge(c.graph, closest, c) def minus(self, c): for v in c.graph[c]: if v.state == self.state: go.remove_edge(c.graph, v, c) break def change_state(self, c): c.previous_state = c.state c.state = self.state class Operation: def __init__(self, c_state, p_state, c_condition, command): self.c_state = c_state self.p_state = p_state self.c_condition = c_condition.replace(' ', '') self.command = Command(command.replace(' ', '')) def execute(self, cell): if cell.state == self.c_state and cell.previous_state == self.p_state: if self.connection_condition_satisfied(cell.number_of_connections): self.command.function(cell) def connection_condition_satisfied(self, c): return eval(self.c_condition) class Genome: re_operation = re.compile(r'(\w+)\((\w*)\),(.+):(.+)') def __init__(self, text): self.operations = [] for line in text.splitlines(): current_state, previous_state, connection_condition, command = re.match(Genome.re_operation, line).groups() self.operations.append(Operation(current_state, previous_state, connection_condition, command))
olya-d/growing-graph
automata/genome.py
genome.py
py
2,515
python
en
code
0
github-code
13
13538875132
from django.test import TestCase from scoreboard.models import ScoreBoard class ScoreBoardTest(TestCase): """ Test module for Puppy model """ def setUp(self): ScoreBoard.objects.create(name='Johhny', score='100') ScoreBoard.objects.create(name='Bravo', score=75) def test_puppy_breed(self): johhny = ScoreBoard.objects.get(name='Johhny') bravo = ScoreBoard.objects.get(name='Bravo') self.assertEqual(johhny.get_username(), "Johhny") self.assertEqual(bravo.get_username(), "Bravo")
emerengg/reaction-time-based-game
server/src/scoreboard/tests/test_models.py
test_models.py
py
543
python
en
code
0
github-code
13
39947161962
#!/usr/bin/env python3 import csv def parse_csv(data, has_header=False): """ Parses the CSV data into a list of dictionary objects Throws an exception if the CSV is badly formatted Arguments data -- a string containing the CSV data has_header -- parse the first row as a header or not """ reader = csv.reader( data.splitlines(), quotechar='"', delimiter=',', quoting=csv.QUOTE_ALL, skipinitialspace=True) rows = list(reader) if len(rows) == 0: return [] column_count = len(rows[0]) header = [] offset = 0 if has_header: header = rows[0] # Skip first row if it's the header in the data offset = 1 else: header = [ str(i) for i in range(0, column_count) ] ret = [] for x in range(offset, len(rows)): row = rows[x] tmp = {} if len(row) != column_count: raise Exception( "Column count for row is not equal to the expected. " "Expected '{}' columns, row at line '{}' has '{}' columns" .format(column_count, x, len(row))) for i in range(0, column_count): tmp[header[i]] = row[i] ret.append(tmp) return ret
jtmpu/latedit
latedit/csv.py
csv.py
py
1,273
python
en
code
0
github-code
13
1514991995
with open("sinav_veri_seti.txt", "r") as f: liste = f.readlines() f.close() def change_label(label): if "#0#" in label: return "Olumsuz" elif "#1#" in label: return "Olumlu" else: return "Tarafsız" etiketler = [] gorusler = [] for k in liste: if ";; " not in k: gorusler.append(k.split(";;")[0]) etiketler.append(k.split(";;")[1]) with open("sinav_veri_seti_son.txt", "a") as f: for k in range(len(etiketler)): f.write(gorusler[k]+";;"+change_label(etiketler[k])+"\n") f.close()
sandiklibilgisayarprogramlama/bilgisayarlaveriisleme-2023
hafta 13/duygu_siniflandirma.py
duygu_siniflandirma.py
py
566
python
en
code
0
github-code
13
5471601
import matplotlib.pyplot as plt import numpy as np from scipy.integrate import solve_ivp import tensorflow as tf from tensorflow import keras from tensorflow.keras import layers import sys sys.path.append('..') import neural_ode.NeuralODE import neural_ode.ODESolvers tf.keras.backend.set_floatx('float64') class SimpleModel(tf.keras.Model): def __init__(self, dyn_dim = 1): super().__init__() w_init = tf.random_normal_initializer(mean=-1.0, stddev=0.05) self.w = tf.Variable( initial_value = w_init(shape=(dyn_dim*2, dyn_dim), dtype="float64"), trainable=True, ) self.dyn_dim = dyn_dim def call(self, inputs): vels = inputs[:, self.dyn_dim:] accs = tf.matmul(inputs, self.w) return tf.concat([vels, accs], axis=1) model = SimpleModel() n_ode = neural_ode.NeuralODE.NeuralODE(model, 2) # N_n = int(2) c = 0.1 k = 4.0 def oscilator(t, y): return np.array([y[1], -c*y[1]-k*y[0]]) t_final = 20.0 n_eval = int(501) t_span = np.array([0.0, t_final]) y0 = np.array([1.0, 0.0]) sol = solve_ivp(oscilator, t_span, y0, t_eval=np.linspace(0, t_final, num=n_eval)) # transform to tensorflow t_span_tf = tf.constant(t_span) y0_tf = tf.constant(y0, dtype=tf.float64) t_target = tf.constant(sol.t) # only displacements y_target = tf.expand_dims(tf.constant(np.transpose(sol.y[0, :])), axis=1) # model.variables[0].assign(np.array([[-k+0.0], [-c]])) n_epoch = 2 n_ode.fit(t_target, y_target, n_epoch=n_epoch, n_batch=1, adjoint_method=False, missing_derivative=[0], adjust_initial=True)
IvanPles/Neural-ODE
neural_ode/Test_second_deriv.py
Test_second_deriv.py
py
1,596
python
en
code
2
github-code
13
9984085861
# -*- coding: utf-8 -*- ############################################### #created by : lxy #Time: 2018/12/3 14:09 #project: Face detect #company: #rversion: 0.1 #tool: python 2.7 #modified: #description face detect testing caffe model #################################################### import numpy as np def bbox_overlaps(boxes1, boxes2): """Computes IoU overlaps between two sets of boxes. boxes1:anchors boxes2: gt_box, [N, (y1, x1, y2, x2)]. For better performance, pass the largest set first and the smaller second. """ # Areas of anchors and GT boxes area1 = (boxes1[:, 2] - boxes1[:, 0]) * (boxes1[:, 3] - boxes1[:, 1]) area2 = (boxes2[:, 2] - boxes2[:, 0]) * (boxes2[:, 3] - boxes2[:, 1]) # Compute overlaps to generate matrix [boxes1 count, boxes2 count] # Each cell contains the IoU value. overlaps = np.zeros((boxes1.shape[0], boxes2.shape[0])) for i in range(overlaps.shape[1]): box2 = boxes2[i] overlaps[:, i] = compute_iou(box2, boxes1, area2[i], area1) return overlaps def compute_iou(box, boxes, box_area, boxes_area): """Calculates IoU of the given box with the array of the given boxes. box: 1D vector [y1, x1, y2, x2] boxes: [boxes_count, (y1, x1, y2, x2)] box_area: float. the area of 'box' boxes_area: array of length boxes_count. Note: the areas are passed in rather than calculated here for efficiency. Calculate once in the caller to avoid duplicate work. """ # Calculate intersection areas y1 = np.maximum(box[0], boxes[:, 0]) y2 = np.minimum(box[2], boxes[:, 2]) x1 = np.maximum(box[1], boxes[:, 1]) x2 = np.minimum(box[3], boxes[:, 3]) intersection = np.maximum(x2 - x1, 0) * np.maximum(y2 - y1, 0) union = box_area + boxes_area[:] - intersection[:] iou = intersection / union return iou
jimeffry/ssh-tensorflow
src/utils/boxes_overlap.py
boxes_overlap.py
py
1,862
python
en
code
5
github-code
13
20974188269
import random import requests import time HOSTS = [ 'us-east', 'eu-north', 'ap-south', 'ap-south-alpine', ] VEHICLES = [ 'bike', 'scooter', 'car', ] if __name__ == "__main__": print(f"starting load generator") time.sleep(3) while True: host = HOSTS[random.randint(0, len(HOSTS) - 1)] vehicle = VEHICLES[random.randint(0, len(VEHICLES) - 1)] print(f"requesting {vehicle} from {host}") resp = requests.get(f'http://{host}:5000/{vehicle}') print(f"received {resp}") time.sleep(random.uniform(0.2, 0.4))
grafana/pyroscope
examples/dotnet/rideshare/load-generator.py
load-generator.py
py
591
python
en
code
8,798
github-code
13
38391354543
import numpy as np import cv2 class ConcatenateImages: def __init__(self, imgpath1, imgpath2, imgpath3, imgpath4): self.img1 = cv2.imread(imgpath1) self.img2 = cv2.imread(imgpath2) self.img3 = cv2.imread(imgpath3) self.img4 = cv2.imread(imgpath4) def concatenate(self): step1 = np.concatenate((self.img1, self.img2), axis = 1) step2 = np.concatenate((self.img3, self.img4), axis = 1) res = np.concatenate((step1, step2), axis = 1) return res
mharunturkmenoglu/ComputerVision
ExtendImage/concatenateImages.py
concatenateImages.py
py
515
python
en
code
0
github-code
13
6538031578
import socket import sys from config import * # Create a UDP socket sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) try: while True: message = raw_input() print('sending "{}"'.format(message)) sent = sock.sendto(message, (SERVER_ADDRESS, SERVER_PORT)) finally: print('closing socket') sock.close()
IzzyBrand/ledvis
old/led_test_client.py
led_test_client.py
py
343
python
en
code
40
github-code
13
2290666842
#!/usr/bin/python #!/usr/bin/python -tt # -*- coding: utf-8 -*- # (c) 2012, Red Hat, Inc # Based on yum module written by Seth Vidal <skvidal at fedoraproject.org> # (c) 2014, Epic Games, Inc. # Written by Lester Claudio <claudiol at redhat.com> # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # import os import platform import tempfile import shutil ANSIBLE_METADATA = {'status': ['stableinterface'], 'supported_by': 'core', 'version': '1.0'} DOCUMENTATION = ''' --- module: buildah_commit version_added: historical short_description: buildah-commit - Create an image from a working container. description: buildah-commit - Create an image from a working container. Writes a new image using the specified container's read-write layer and if it is based on an image, the layers of that image. If image does not begin with a registry name component, localhost will be added to the name. options: # informational: requirements for nodes requirements: [ buildah ] author: - "Red Hat Consulting (NAPS)" - "Lester Claudio" ''' EXAMPLES = ''' - name: BUILDAH | Test output of "buildah add --noheading <image_name>" command buildah_commit: container: fedora-working-container imgname: docker://localhost:5000/fedora-claudiol creds: username:password heading: no register: result - debug: var=result.stdout_lines ''' def buildah_commit(module, container, imgname, authfile, certdir, creds, compression, format, iidfile, quiet, rm, signature_policy, squash, tls_verify): if module.get_bin_path('buildah'): buildah_bin = module.get_bin_path('buildah') buildah_basecmd = [buildah_bin, 'commit'] if authfile: r_cmd = ['--authfile'] buildah_basecmd.extend(r_cmd) r_cmd = [autfile] buildah_basecmd.extend(r_cmd) if certdir: r_cmd = ['--cert-dir'] buildah_basecmd.extend(r_cmd) r_cmd = [certdir] buildah_basecmd.extend(r_cmd) if creds: r_cmd = ['--creds'] buildah_basecmd.extend(r_cmd) r_cmd = [creds] buildah_basecmd.extend(r_cmd) if compression: r_cmd = ['--disable-compression'] buildah_basecmd.extend(r_cmd) if format: r_cmd = ['--format'] buildah_basecmd.extend(r_cmd) r_cmd = [format] buildah_basecmd.extend(r_cmd) if iidfile: r_cmd = ['--iidfile'] buildah_basecmd.extend(r_cmd) r_cmd = [iidfile] buildah_basecmd.extend(r_cmd) if quiet: r_cmd = ['--quiet'] buildah_basecmd.extend(r_cmd) if rm: r_cmd = ['--rm'] buildah_basecmd.extend(r_cmd) if container: r_cmd = [container] buildah_basecmd.extend(r_cmd) if imgname: r_cmd = [imgname] buildah_basecmd.extend(r_cmd) return module.run_command(buildah_basecmd) def main(): module = AnsibleModule( argument_spec = dict( container=dict(required=True), imgname=dict(required=True), authfile=dict(required=False, default=''), certdir=dict(required=False, default=''), creds=dict(required=False, default=''), compression=dict(required=False, default='no', type='bool'), format=dict(required=False, default='oci', choices=['oci', 'docker']), iidfile=dict(required=False, default=""), quiet=dict(required=False, default="no", type="bool"), rm=dict(required=False, default="no", type="bool"), signature_policy=dict(required=False, default=""), squash=dict(required=False, default="no", type="bool"), tls_verify=dict(required=False, default="no", type="bool") ), supports_check_mode = True ) params = module.params container = params.get('container', '') imgname = params.get('imgname', '') authfile = params.get('authfile', '') certdir = params.get('certdir', '') creds = params.get('creds', '') compression = params.get('compression', '') format = params.get('format', '') iidfile = params.get('iidfile', '') quiet = params.get('quiet', '') rm = params.get('rm', '') signature_policy = params.get('signature_policy', '') squash = params.get('squash', '') tls_verify = params.get('tls_verify', '') rc, out, err = buildah_commit(module, container, imgname, authfile, certdir, creds, compression, format, iidfile, quiet, rm, signature_policy, squash, tls_verify) if rc == 0: module.exit_json(changed=True, rc=rc, stdout=out, err = err ) else: module.fail_json(msg=err) ##changed=False, rc=rc, stdout=out, err = err ) # import module snippets from ansible.module_utils.basic import * from ansible.module_utils.urls import * if __name__ == '__main__': main()
claudiol/buildah-ansible
library/buildah_commit.py
buildah_commit.py
py
5,597
python
en
code
3
github-code
13
17050584054
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class CreditPayChargePricingVO(object): def __init__(self): self._actual_charge = None self._actual_charge_rate = None self._charge_code = None self._charge_name = None self._origin_charge = None self._origin_charge_rate = None @property def actual_charge(self): return self._actual_charge @actual_charge.setter def actual_charge(self, value): self._actual_charge = value @property def actual_charge_rate(self): return self._actual_charge_rate @actual_charge_rate.setter def actual_charge_rate(self, value): self._actual_charge_rate = value @property def charge_code(self): return self._charge_code @charge_code.setter def charge_code(self, value): self._charge_code = value @property def charge_name(self): return self._charge_name @charge_name.setter def charge_name(self, value): self._charge_name = value @property def origin_charge(self): return self._origin_charge @origin_charge.setter def origin_charge(self, value): self._origin_charge = value @property def origin_charge_rate(self): return self._origin_charge_rate @origin_charge_rate.setter def origin_charge_rate(self, value): self._origin_charge_rate = value def to_alipay_dict(self): params = dict() if self.actual_charge: if hasattr(self.actual_charge, 'to_alipay_dict'): params['actual_charge'] = self.actual_charge.to_alipay_dict() else: params['actual_charge'] = self.actual_charge if self.actual_charge_rate: if hasattr(self.actual_charge_rate, 'to_alipay_dict'): params['actual_charge_rate'] = self.actual_charge_rate.to_alipay_dict() else: params['actual_charge_rate'] = self.actual_charge_rate if self.charge_code: if hasattr(self.charge_code, 'to_alipay_dict'): params['charge_code'] = self.charge_code.to_alipay_dict() else: params['charge_code'] = self.charge_code if self.charge_name: if hasattr(self.charge_name, 'to_alipay_dict'): params['charge_name'] = self.charge_name.to_alipay_dict() else: params['charge_name'] = self.charge_name if self.origin_charge: if hasattr(self.origin_charge, 'to_alipay_dict'): params['origin_charge'] = self.origin_charge.to_alipay_dict() else: params['origin_charge'] = self.origin_charge if self.origin_charge_rate: if hasattr(self.origin_charge_rate, 'to_alipay_dict'): params['origin_charge_rate'] = self.origin_charge_rate.to_alipay_dict() else: params['origin_charge_rate'] = self.origin_charge_rate return params @staticmethod def from_alipay_dict(d): if not d: return None o = CreditPayChargePricingVO() if 'actual_charge' in d: o.actual_charge = d['actual_charge'] if 'actual_charge_rate' in d: o.actual_charge_rate = d['actual_charge_rate'] if 'charge_code' in d: o.charge_code = d['charge_code'] if 'charge_name' in d: o.charge_name = d['charge_name'] if 'origin_charge' in d: o.origin_charge = d['origin_charge'] if 'origin_charge_rate' in d: o.origin_charge_rate = d['origin_charge_rate'] return o
alipay/alipay-sdk-python-all
alipay/aop/api/domain/CreditPayChargePricingVO.py
CreditPayChargePricingVO.py
py
3,760
python
en
code
241
github-code
13
41408536548
def pegarmax(lista): tei = max(lista) deish = lista.index(tei) return deish zenti = [] while True: try: cadeia = [] entrada= input() entrada = [int(x) for x in entrada.split(" ")] entrada1= input() if entrada1 == "": entrada1= input() for x in entrada1: cadeia.append(x) cadeia = [] tamanho = entrada[0] remocao = entrada[1] for x in entrada1: cadeia.append(x) cont= 0 zezin = pegarmax(cadeia) dedurar = "" while len(dedurar) < (tamanho-remocao): if len(cadeia[zezin:])+cont <(tamanho-remocao): while len(cadeia[zezin:])+cont <(tamanho-remocao): sub = cadeia[:zezin] zezin = pegarmax(sub) else: dedurar += cadeia[zezin] cadeia = cadeia[(zezin+1):] cont+=1 print(dedurar) except: break
BrunoVirgu/LProg2017.2
LAB_PROG/src/quebrando.py
quebrando.py
py
1,158
python
pt
code
0
github-code
13
36550054919
from rest_framework import serializers from .models import Adv, PrivatPaymentModel, YandexPaymentModel from django.contrib.auth.models import User from rest_framework_simplejwt.serializers import TokenObtainPairSerializer class AdvSerializer(serializers.HyperlinkedModelSerializer): owner = serializers.ReadOnlyField(source='owner.username') class Meta: model = Adv fields = ('id', 'url', 'owner', 'datetime_start', 'datetime_end', 'content', 'wn8', 'wins_percent', 'tag', 'url_clan', 'status') class PrivatSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = PrivatPaymentModel fields = ('payment_id', 'state', 'message', 'amt', 'ccy') class YandexSerializer(serializers.HyperlinkedModelSerializer): class Meta: model = YandexPaymentModel fields = ('operation_id', 'amount', 'codepro', 'label') class UserSerializer(serializers.ModelSerializer): ads = serializers.PrimaryKeyRelatedField(many=True, queryset=Adv.objects.all()) class Meta: model = User fields = ('username', 'email', 'first_name', 'last_name', 'is_superuser', 'ads') class TokenSerializer(TokenObtainPairSerializer): @classmethod def get_token(cls, user): token = super().get_token(user) token.user = User.objects.get(username=user.username) return token def validate(self, attrs): data = super().validate(attrs) refresh = self.get_token(self.user) data['refresh'] = str(refresh) data['access'] = str(refresh.access_token) data['user'] = {"pk": refresh.user.pk, "username": str(refresh.user.username), "email": str(refresh.user.email), "first_name": str(refresh.user.first_name), "last_name": str(refresh.user.last_name) } return data
alpine-cat/back
getmoney/getmoney/serializers.py
serializers.py
py
1,885
python
en
code
0
github-code
13
3406729119
"""Experiment definition abstraction class.""" import contextlib import dateutil.parser from jacquard.utils import check_keys from jacquard.buckets import NUM_BUCKETS from jacquard.constraints import Constraints, ConstraintContext class Experiment(object): """ The definition of an experiment. This is essentially a plain-old-data class with utility methods for canonical serialisation and deserialisation of various flavours. """ def __init__( self, experiment_id, branches, *, constraints=None, name=None, launched=None, concluded=None ): """Base constructor. Takes all the arguments.""" if not experiment_id: raise ValueError("Experiment ID must be non-empty") self.id = experiment_id if not branches: raise ValueError("No branches given") branch_ids = set() for branch in branches: if "id" not in branch: raise ValueError("Branch without ID") branch_id = branch["id"] if branch_id in branch_ids: raise ValueError( "Duplicate branch ID: '{branch_id}'".format(branch_id=branch_id) ) branch_ids.add(branch_id) if "settings" not in branch: raise ValueError("No settings given") self.branches = branches if constraints is not None: self.constraints = constraints else: self.constraints = Constraints() self.name = name or self.id if not self.name: raise ValueError("Blank name") self.launched = launched self.concluded = concluded if self.concluded and not self.launched: raise ValueError("Experiment concluded but not launched") if self.concluded and self.launched and self.launched > self.concluded: raise ValueError("Experiment concluded before launch") def is_live(self): """Establish whether this experiment is running.""" return self.launched is not None and self.concluded is None @classmethod def from_json(cls, obj): """ Create instance from a JSON-esque definition. Required keys: id, branches Optional keys: name, constraints, launched, concluded """ kwargs = {} if not isinstance(obj, dict): raise ValueError( "Experiment definition is not valid – " "top level is not a dict" ) if "id" not in obj: raise ValueError( "Experiment definition is not valid – " "no `id` is given." ) if "branches" not in obj: raise ValueError( "Experiment definition is not valid - " "no `branches` given." ) with contextlib.suppress(KeyError): kwargs["name"] = obj["name"] with contextlib.suppress(KeyError): kwargs["constraints"] = Constraints.from_json(obj["constraints"]) with contextlib.suppress(KeyError): kwargs["launched"] = dateutil.parser.parse(obj["launched"]) with contextlib.suppress(KeyError): kwargs["concluded"] = dateutil.parser.parse(obj["concluded"]) return cls(obj["id"], obj["branches"], **kwargs) @classmethod def from_store(cls, store, experiment_id): """Create instance from a store lookup by ID.""" json_repr = dict( store["experiments/{experiment_id}".format(experiment_id=experiment_id)] ) # Be resilient to missing ID if "id" not in json_repr: json_repr["id"] = experiment_id return cls.from_json(json_repr) @classmethod def enumerate(cls, store): """ Iterator over all named experiments in a store. Includes inactive experiments. """ prefix = "experiments/" for key in store: if not key.startswith(prefix): continue experiment_id = key[len(prefix):] yield cls.from_store(store, experiment_id) def to_json(self): """Serialise as canonical JSON.""" representation = { "id": self.id, "branches": self.branches, "constraints": self.constraints.to_json(), "name": self.name, "launched": str(self.launched), "concluded": str(self.concluded), } if not representation["constraints"]: del representation["constraints"] if representation["name"] == self.id: del representation["name"] if representation["launched"] == "None": del representation["launched"] if representation["concluded"] == "None": del representation["concluded"] return representation def save(self, store): """Save into the given store using the ID as the key.""" store[ "experiments/{experiment_id}".format(experiment_id=self.id) ] = self.to_json() def branch(self, branch_id): """ Get the branch with a given ID. In case of multiple branches with the same ID (which should Never Ever Happen), behaviour is undefined. If there is no such branch, LookupErrors will materialise. """ branches_by_id = {x["id"]: x for x in self.branches} check_keys((branch_id,), branches_by_id.keys(), exception=LookupError) return branches_by_id[branch_id] def _num_buckets(self, bucket_description): percent = bucket_description.get("percent", 100 // len(self.branches)) return (NUM_BUCKETS * percent) // 100 def branch_launch_configuration(self): """ Launch configuration for the branches of this experiment. This is the format expected for the `branches` argument of `release` and `close`, to actually decide which buckets see this experiment. """ return [(x["id"], self._num_buckets(x), x["settings"]) for x in self.branches] def includes_user(self, user_entry): """ Check whether a user meets the experiment's constraints. A (hopefully constant time) predicate. """ try: specialised_constraints = self._specialised_constraints except AttributeError: specialised_constraints = self.constraints.specialise( ConstraintContext(era_start_date=self.launched) ) self._specialised_constraints = specialised_constraints return specialised_constraints.matches_user(user_entry)
prophile/jacquard
jacquard/experiments/experiment.py
experiment.py
py
6,722
python
en
code
7
github-code
13
40837608259
import discord import logging from discord.ext import commands import random # Log SetUp logging.basicConfig(level=logging.INFO, filename='bot.log', filemode="w") def filterOnlyOnline(member): return member.status != discord.Status.offline and not member.bot class Features(commands.Cog): def __init__(self, bot): self.bot = bot @commands.command() async def kick(self, ctx, user : discord.Member): await ctx.send(":boot: PAF," + ctx.message.author.name + " just kicked {} !".format(user.name)) await ctx.message.delete() @commands.command() async def someone(self, ctx): memb = list(filter(filterOnlyOnline, ctx.guild.members)) length = len(memb) - 1 if length <= 0 : await ctx.send("No one to ping :c !") return else: await ctx.message.delete() Loto = random.randint(0,length) while memb[Loto].id == ctx.message.author.id: Loto = random.randint(0,length) #DEBUG logging.info(ctx.message.author.name + " pinged " + memb[Loto].name) embed = discord.Embed(color=discord.Colour.green()) embed.add_field(name = "You have been randomly pinged by {}, have a good day !".format(ctx.message.author.name), value = memb[Loto].mention, inline = False) embed.set_image(url='https://media.giphy.com/media/wrBURfbZmqqXu/giphy.gif') await ctx.send(embed = embed) @commands.command() async def cheatox(self, ctx): embed = discord.Embed() embed.set_image(url="https://media.giphy.com/media/3ohhwf3mprga8qAIOQ/giphy.gif") await ctx.send(embed=embed) @commands.command() async def poll(self, ctx, *args): #should be a "choice 1, emoji 1, choice 2, emoji 2" if (len(args)%2) != 0 : await ctx.send("You must have made a mistake, pattern is: \">pool_choice1_emoji1_choice2\_emoji2 ... \" with \_ as space") await bot.delete_message(ctx.message) else: poll = "" count = 0 k = 1 for i in range(0, len(args)): if (i == len(args)-1) : poll += args[i] else: if count == 0 : #is a choice poll += (args[i] + " ") count = 1 else: poll += (args[i] + " ou ") count = 0 embed = discord.Embed() embed.add_field(name = poll, value = "asks " + ctx.message.author.name, inline = False) message = await ctx.send(embed = embed) while k < len(args): await message.add_reaction(args[k]) k+=2 await ctx.message.delete() async def setup(bot): await bot.add_cog(Features(bot))
Viri0x/DiscordBot
cogs/features.py
features.py
py
3,097
python
en
code
0
github-code
13
38426087823
from __future__ import annotations import itertools from typing import Any, Iterable, Iterator, MutableMapping import toml from packaging.utils import NormalizedName from packaging.utils import canonicalize_name as canonicalize_project_name from pants.backend.python.macros.common_fields import ( ModuleMappingField, RequirementsOverrideField, TypeStubsModuleMappingField, ) from pants.backend.python.pip_requirement import PipRequirement from pants.backend.python.target_types import ( PythonRequirementModulesField, PythonRequirementResolveField, PythonRequirementsField, PythonRequirementTarget, PythonRequirementTypeStubModulesField, ) from pants.base.glob_match_error_behavior import GlobMatchErrorBehavior from pants.build_graph.address import Address from pants.core.target_types import ( TargetGeneratorSourcesHelperSourcesField, TargetGeneratorSourcesHelperTarget, ) from pants.engine.fs import DigestContents, PathGlobs from pants.engine.internals.selectors import Get from pants.engine.rules import Rule, collect_rules, rule from pants.engine.target import ( COMMON_TARGET_FIELDS, Dependencies, GeneratedTargets, GenerateTargetsRequest, InvalidFieldException, SingleSourceField, TargetGenerator, ) from pants.engine.unions import UnionMembership, UnionRule from pants.util.logging import LogLevel from pants.util.strutil import softwrap def parse_pyproject_toml( pyproject_toml: str, *, rel_path: str, overrides: MutableMapping[NormalizedName, MutableMapping[str, Any]], ) -> Iterator[PipRequirement]: parsed = toml.loads(pyproject_toml) deps_vals: list[str] = parsed.get("project", {}).get("dependencies", []) optional_dependencies = parsed.get("project", {}).get("optional-dependencies", {}) if not deps_vals and not optional_dependencies: raise KeyError( softwrap( "No section `project.dependencies` or " f"`project.optional-dependencies` found in {rel_path}" ) ) for dep in deps_vals: dep, _, _ = dep.partition("--") dep = dep.strip().rstrip("\\") if not dep or dep.startswith(("#", "-")): continue yield PipRequirement.parse(dep, description_of_origin=rel_path) for tag, opt_dep in optional_dependencies.items(): for dep in opt_dep: req = PipRequirement.parse(dep, description_of_origin=rel_path) # canonical_project_name = canonicalize_project_name(req.project_name) # override = overrides.get(canonical_project_name, {}) # tags: list[str] = override.get("tags", []) # tags.append(tag) # override["tags"] = tags # overrides[canonical_project_name] = override yield req class PEP621RequirementsSourceField(SingleSourceField): default = "pyproject.toml" required = False class PEP621RequirementsTargetGenerator(TargetGenerator): alias = "pep621_requirements" help = ( "Generate a `python_requirement` for each entry in a PEP 621 compliant " "pyproject.toml." ) generated_target_cls = PythonRequirementTarget # Note that this does not have a `dependencies` field. core_fields = ( *COMMON_TARGET_FIELDS, ModuleMappingField, TypeStubsModuleMappingField, PEP621RequirementsSourceField, RequirementsOverrideField, ) copied_fields = COMMON_TARGET_FIELDS moved_fields = (PythonRequirementResolveField,) class GenerateFromPEP621RequirementsRequest(GenerateTargetsRequest): # type: ignore generate_from = PEP621RequirementsTargetGenerator @rule( desc="Generate `python_requirement` targets from PEP621 pyproject.toml", level=LogLevel.DEBUG, ) async def generate_from_pep621_requirement( request: GenerateFromPEP621RequirementsRequest, union_membership: UnionMembership ) -> GeneratedTargets: generator = request.generator pyproject_rel_path = generator[PEP621RequirementsSourceField].value pyproject_full_path = generator[PEP621RequirementsSourceField].file_path overrides = { canonicalize_project_name(k): v for k, v in request.require_unparametrized_overrides().items() } file_tgt = TargetGeneratorSourcesHelperTarget( {TargetGeneratorSourcesHelperSourcesField.alias: pyproject_rel_path}, Address( request.template_address.spec_path, target_name=request.template_address.target_name, relative_file_path=pyproject_rel_path, ), ) digest_contents = await Get( DigestContents, PathGlobs( [pyproject_full_path], glob_match_error_behavior=GlobMatchErrorBehavior.error, description_of_origin=( f"{generator}'s field `{PEP621RequirementsSourceField.alias}`" ), ), ) requirements = parse_pyproject_toml( digest_contents[0].content.decode(), rel_path=pyproject_full_path, overrides=overrides, ) grouped_requirements = itertools.groupby( requirements, lambda parsed_req: parsed_req.project_name ) module_mapping = generator[ModuleMappingField].value stubs_mapping = generator[TypeStubsModuleMappingField].value def generate_tgt( project_name: str, parsed_reqs: Iterable[PipRequirement] ) -> PythonRequirementTarget: normalized_proj_name = canonicalize_project_name(project_name) tgt_overrides = overrides.pop(normalized_proj_name, {}) if Dependencies.alias in tgt_overrides: tgt_overrides[Dependencies.alias] = list( tgt_overrides[Dependencies.alias] ) + [file_tgt.address.spec] return PythonRequirementTarget( { **request.template, PythonRequirementsField.alias: list(parsed_reqs), PythonRequirementModulesField.alias: module_mapping.get( normalized_proj_name ), PythonRequirementTypeStubModulesField.alias: stubs_mapping.get( normalized_proj_name ), # This may get overridden by `tgt_overrides`, which will have already # added in the file tgt. Dependencies.alias: [file_tgt.address.spec], **tgt_overrides, }, request.template_address.create_generated(project_name), union_membership, ) result = tuple( generate_tgt(project_name, parsed_reqs_) for project_name, parsed_reqs_ in grouped_requirements ) + (file_tgt,) if overrides: raise InvalidFieldException( softwrap( f""" Unused key in the `overrides` field for {request.template_address}: {sorted(overrides)} """ ) ) return GeneratedTargets(generator, result) def rules() -> tuple[Rule | UnionRule, ...]: return ( *collect_rules(), UnionRule(GenerateTargetsRequest, GenerateFromPEP621RequirementsRequest), )
bryanwweber/pants-dependency-tracking
pants-plugins/pep621/pep621_requirements.py
pep621_requirements.py
py
7,186
python
en
code
0
github-code
13
20464043218
# 1929 import sys # 소수 리스트 max_n = 1000001 prime = [True] * max_n end = int(max_n ** 0.5) for i in range(2, end + 1): if prime[i]: for j in range(i+i, max_n, i): prime[j] = False # 입력 m, n = map(int, sys.stdin.readline().split()) m = 2 if m == 1 else m prime_list = [i for i in range(m, n+1) if prime[i]] for i in prime_list: print(i)
mhseo10/Baekjoon-Algorithm
basic/math/math_1929.py
math_1929.py
py
382
python
ko
code
0
github-code
13
74048168979
import os os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'first_project.settings') import django django.setup() ## FAKE POP SCRIPT import random from first_app.models import AccessRecord, Webpage, Topic from faker import Faker fakegen = Faker() topics=['Search', 'Social', 'Marketplace', 'News', 'Games'] def add_topic(): t = Topic.objects.get_or_create(top_name=random.choice(topics))[0] # Returns a tuple por eso agarramos el 0 t.save() return t def populate(N=5): for entry in range(N): # get the topic for the entry top = add_topic() # Create fake data for that entry fake_url = fakegen.url() fake_date = fakegen.date() fake_name = fakegen.company() # Create new webpage entry webpage = Webpage.objects.get_or_create(topic=top, url=fake_url, name=fake_name)[0] # Create a fake access record for that webpage # Usamos name=webpage porque en el modelo name esta especificado como Foreign Key acc_rec = AccessRecord.objects.get_or_create(name=webpage, date=fake_date)[0] if __name__ == '__main__': print('Populating script!') populate(20) print('Populating compleate!') # To run this script # python populate_first_app.py
ChristinaGaitan/django_first_project
populate_first_app.py
populate_first_app.py
py
1,193
python
en
code
0
github-code
13
73730926096
import subprocess import os # Nome do pacote do aplicativo que estamos controlando eat_venture_package = 'com.hwqgrhhjfd.idlefastfood' # Função para iniciar o aplicativo def open_app(): try: # Abrir o aplicativo usando o comando adb # cmd: adb shell monkey -p com.hwqgrhhjfd.idlefastfood -c android.intent.category.LAUNCHER 1 with open(os.devnull, 'w') as null_file: subprocess.run(['adb', 'shell', 'monkey', '-p', eat_venture_package, '-c', 'android.intent.category.LAUNCHER', '1'], stdout=null_file, stderr=null_file) print(f'O aplicativo {eat_venture_package} foi aberto com sucesso.') except Exception as e: print(f"Erro ao abrir o aplicativo: {str(e)}") #Função para validar se está aberto def app_is_open(): try: # Execute o comando adb para listar as atividades em execução # cmd(Filtrando o eatventure) adb shell dumpsys activity activities | findstr com.hwqgrhhjfd.idlefastfood result = subprocess.run(['adb', 'shell', 'dumpsys', 'activity', 'activities'], capture_output=True, text=True) # Verifique se o nome do pacote está na saída do comando if eat_venture_package in result.stdout: return True # O aplicativo está aberto else: return False # O aplicativo não está aberto except Exception as e: print(f"Erro ao verificar se o aplicativo já está aberto: {str(e)}") return False # Em caso de erro, assumimos que o aplicativo não está aberto # Função valida se a tela está em 1° plano. def is_screen_in_focus(): try: # Execute o comando adb para obter informações sobre a atividade em primeiro plano result = subprocess.run(['adb', 'shell', 'dumpsys', 'activity', 'activities'], capture_output=True, text=True) # Procure a linha que contém a atividade em primeiro plano (foreground) for line in result.stdout.splitlines(): if 'topActivity' in line and eat_venture_package in line: # A tela está em foco return True # A tela não está em foco return False except Exception as e: print(f"Erro ao verificar se tela está em 1° plano: {str(e)}") # Em caso de erro, assumimos que a tela não está em foco return False # Traz para a frente caso não esteja def bring_to_foreground(): try: # Execute o comando adb para trazer a atividade principal do aplicativo "eatventure" para o primeiro plano subprocess.run(['adb', 'shell', 'am', 'start', '-n', 'com.hwqgrhhjfd.idlefastfood/.MainActivity']) print("A tela do aplicativo eatventure foi trazida para o primeiro plano com sucesso.") except Exception as e: print(f"Erro ao trazer a tela do aplicativo para o primeiro plano: {str(e)}") def capture_and_copy_screenshot(project_folder="./"): # Nome do arquivo de captura de tela no dispositivo screenshot_filename = "/sdcard/screenshot.png" # Comando ADB para capturar a tela adb_capture_command = f"adb shell screencap {screenshot_filename}" # Captura a tela no dispositivo try: subprocess.run(adb_capture_command, shell=False) except subprocess.CalledProcessError as e: print(f"Erro ao capturar a tela: {e}") return None # Caminho para a pasta do projeto onde deseja salvar a captura de tela # Comando ADB para copiar a captura de tela para a pasta do projeto adb_pull_command = f"adb pull {screenshot_filename} {project_folder}" # Copia a captura de tela para a pasta do projeto try: subprocess.run(adb_pull_command, shell=True) return project_folder except subprocess.CalledProcessError as e: print(f"Erro ao copiar a captura de tela: {e}") return None def swipe_up(x1, y1, x2, y2, duration=500): """ Simula um gesto de arrastar para cima na tela do dispositivo Android. Args: x1 (int): Coordenada x de início do gesto. y1 (int): Coordenada y de início do gesto. x2 (int): Coordenada x de fim do gesto. y2 (int): Coordenada y de fim do gesto. duration (int): Duração do gesto em milissegundos (padrão: 500). """ adb_swipe_up_command = f"adb shell input swipe {x1} {y1} {x2} {y2} {duration}" subprocess.run(adb_swipe_up_command, shell=True) def swipe_down(x1, y1, x2, y2, duration=500): """ Simula um gesto de arrastar para baixo na tela do dispositivo Android. Args: x1 (int): Coordenada x de início do gesto. y1 (int): Coordenada y de início do gesto. x2 (int): Coordenada x de fim do gesto. y2 (int): Coordenada y de fim do gesto. duration (int): Duração do gesto em milissegundos (padrão: 500). """ adb_swipe_down_command = f"adb shell input swipe {x1} {y1} {x2} {y2} {duration}" subprocess.run(adb_swipe_down_command, shell=True) def press_and_hold(x, y, duration_seconds): try: # Comando adb para pressionar e segurar na tela adb_command = f"adb shell input swipe {x} {y} {x} {y} {int(duration_seconds * 1000)}" # Executar o comando adb subprocess.run(adb_command, shell=True, check=True) print(f'Pressionando e segurando em ({x}, {y}) por {duration_seconds} segundos.') except subprocess.CalledProcessError as e: print(f"Erro ao pressionar e segurar: {str(e)}") except Exception as e: print(f"Erro inesperado: {str(e)}")
JoaoBoll/eatventure-bot
adb_utils/adb_utils.py
adb_utils.py
py
5,539
python
pt
code
0
github-code
13
21092793163
from setup.graph import Graph import random from math import sqrt import pandas as pd import numpy as np from sklearn.cluster import spectral_clustering from setup.load_streets import Map import os data_path = os.path.join(os.path.abspath('../..'), 'Data') def cluster_graph(street_map, number_clusters): A = np.array(street_map.get_adj_matrix()) colors = spectral_clustering(A, n_clusters=number_clusters) with open(os.path.join(data_path, 'node_colors.txt'), 'w+') as f: for c in colors: f.write(str(c)+' ') def read_colors(): colors = [] with open(os.path.join(data_path, 'node_colors.txt'), 'r') as f: line = f.read().split(' ') for l in line: try: colors.append(int(l)) except ValueError: pass return colors def grid_search(grid, x,y): x = int(x) y = int(y) # Square search category = grid[y][x] radius = 1 while category == -1: search = [] # A list of locations to search next # Add coordinates of 4 sides of square around start coord search.extend([(x + 1, y + dy) for dy in range(-radius, radius + 1)]) search.extend([(x - 1, y + dy) for dy in range(-radius, radius + 1)]) search.extend([(x + dx, y + 1) for dx in range(-radius, radius + 1)]) search.extend([(x + dx, y - 1) for dx in range(-radius, radius + 1)]) for (nx, ny) in search: try: if grid[ny][nx] != -1: category = grid[ny][nx] break except IndexError: pass radius += 1 return category def cluster_crimes(street_map, width, height): scale_long = height / (street_map.max_long - street_map.min_long) scale_lat = width / (street_map.max_long - street_map.min_long) def canvas_coords(lat, long): lat -= street_map.min_lat long -= street_map.min_long lat *= scale_lat long *= scale_long return lat, long colors = read_colors() # Create grid grid = [[-1 for i in range(width)] for j in range(height)] for street in street_map.streets.values(): for c in street.coords: x, y = canvas_coords(*c) x = min(x, width - 1) y = min(y, height - 1) try: grid[int(y)][int(x)] = colors[street.node1] except IndexError: print(int(x), int(y), ' ', c) crime_table = open(os.path.join(data_path, 'colored_crimes.csv', 'w+')) crime_table.write('LAT,LONG,TIME,COLOR\n') data = pd.read_csv(os.path.join(data_path, 'crimes.csv')) for i in range(len(data['Lat'])): try: x, y = canvas_coords(data['Lat'][i], data['Long'][i]) category = grid_search(grid, x, y) crime_table.write(str(data['Lat'][i]) + ',' + \ str(data['Long'][i]) + ',' + \ data['OCCURRED_ON_DATE'][i] + ',' + \ str(category) + '\n') except (ValueError, IndexError) as err: pass crime_table.close()
thomaspendock/Analyze-Boston
src/setup/clustering.py
clustering.py
py
3,143
python
en
code
1
github-code
13
72289565459
try: import polyinterface except ImportError: import pgc_interface as polyinterface import requests import json import node_funcs LOGGER = polyinterface.LOGGER @node_funcs.add_functions_as_methods(node_funcs.functions) class SensorNode(polyinterface.Node): # class variables id = 'aqi' hint = [0,0,0,0] status= None def __init__(self, controller, primary, address, name): # call the default init super(SensorNode, self).__init__(controller, primary, address, name) self.host = '' self.headers = '' self.configured = False; self.uom = { 'CLITEMP' : 17, 'CLIHUM' : 22, 'BARPRES' : 117, 'GV0' : 56, 'GV1' : 45, 'GV2' : 56, 'GV3' : 56, 'GV4' : 56, 'GV5' : 56, 'GV6' : 56, 'GV7' : 56, 'GV8' : 56, 'GV9' : 56, 'GV10' : 56, 'GV11' : 25, 'GV12' : 51, } drivers = [ {'driver': 'CLITEMP', 'value': 0, 'uom': 17}, # temperature {'driver': 'CLIHUM', 'value': 0, 'uom': 22}, # humidity {'driver': 'BARPRES', 'value': 0, 'uom': 117}, # pressure {'driver': 'GV0', 'value': 0, 'uom': 56}, # current PM2.5 {'driver': 'GV1', 'value': 0, 'uom': 45}, # age in minutes {'driver': 'GV3', 'value': 0, 'uom': 56}, # 10 min avg {'driver': 'GV4', 'value': 0, 'uom': 56}, # 30 min avg {'driver': 'GV5', 'value': 0, 'uom': 56}, # 60 min avg {'driver': 'GV6', 'value': 0, 'uom': 56}, # 6 hr avg {'driver': 'GV7', 'value': 0, 'uom': 56}, # 24 hr avg {'driver': 'GV8', 'value': 0, 'uom': 56}, # 1 week avg {'driver': 'GV10', 'value': 0, 'uom': 56}, # AQI {'driver': 'GV11', 'value': 0, 'uom': 25}, # AQI string {'driver': 'GV12', 'value': 0, 'uom': 51}, # confidence ] def configure(self, sensor, apikey): self.host = 'https://api.purpleair.com/v1/sensors/' + sensor self.headers = {'X-API-Key':apikey} self.configured = True def epa_aqi(self, pm25): aqi = 0 breakpoints = [ [0, 12], [12.1, 35.4], [35.5, 55.4], [55.5, 150.4], [150.5, 250.4], [250.5, 500.4], ] indexes = [ [0, 50], [51, 100], [101, 150], [151, 200], [201, 300], [301, 500], ] pm25 = round(pm25,1) # find the breakpoints for the pm25 value try: for bpi in range(0,6): if pm25 >= breakpoints[bpi][0] and pm25 <= breakpoints[bpi][1]: break except Exception as e: LOGGER.error('AQI_bp: ' + str(e)) if bpi == 6: LOGGER.error('AQI out of range!') return try: aqi = ((indexes[bpi][1] - indexes[bpi][0]) / (breakpoints[bpi][1] - breakpoints[bpi][0])) * (pm25 - breakpoints[bpi][0]) + indexes[bpi][0] except Exception as e: LOGGER.error('AQI_calc: ' + str(e)) LOGGER.debug('Calculated AQI = ' + str(aqi)) return (round(aqi, 0), indexes[bpi][0]) def calculate_confidence(self, results): channel_a = results[0] channel_b = results[1] if 'AGE' in channel_a and 'AGE' in channel_b: if channel_a['AGE'] != channel_b['AGE']: LOGGER.error('data channels age differs, bad data!') return 0 else: LOGGER.error('missing data age info.') return 0 if 'PM2_5Value' in channel_a and 'PM2_5Value' in channel_b: A = float(channel_a['PM2_5Value']) B = float(channel_b['PM2_5Value']) C = 100 - abs(((A - B) / (A + B)) * 100) return round(C, 0) else: LOGGER.error('missing data for PM2.5.') return 0 def shortPoll(self): # Query for the current air quality conditions. We can do this fairly # frequently, probably as often as once a minute. if not self.configured: LOGGER.info('Skipping connection because we aren\'t configured yet.') return try: c = requests.get(self.host, headers=self.headers) try: jdata = c.json() except: LOGGER.error('Connection issue: ' + str(c)) c.close() return c.close() LOGGER.debug(jdata) if jdata == None: LOGGER.error('Current condition query returned no data') return sensor = jdata['sensor'] if 'name' in sensor: LOGGER.info('Air Quality data for ' + sensor['name']) if 'model' in sensor: LOGGER.info('Air Quality sensor type ' + sensor['model']) if 'pm2.5' in sensor: self.update_driver('GV0', sensor['pm2.5']) (aqi, idx) = self.epa_aqi(float(sensor['pm2.5'])) self.update_driver('GV10', aqi) self.update_driver('GV11', idx) if 'confidence' in sensor: LOGGER.info('Data confidence level = ' + str(sensor['confidence']) + '%') self.update_driver('GV12', sensor['confidence']) if 'temperature' in sensor: self.update_driver('CLITEMP', sensor['temperature']) if 'humidity' in sensor: self.update_driver('CLIHUM', sensor['humidity']) if 'pressure' in sensor: self.update_driver('BARPRES', sensor['pressure']) # age is difference between jdata[time_stamp] and sensor['last_seen'] # in minutes if 'time_stamp' in jdata and 'last_seen' in sensor: age = (jdata['time_stamp'] - sensor['last_seen']) / 60 self.update_driver('GV1', age) if 'stats' in sensor: stats = sensor['stats'] if 'pm2.5_10minute' in stats: self.update_driver('GV3', stats['pm2.5_10minute']) if 'pm2.5_30minute' in stats: self.update_driver('GV4', stats['pm2.5_30minute']) if 'pm2.5_60minute' in stats: self.update_driver('GV5', stats['pm2.5_60minute']) if 'pm2.5_6hour' in stats: self.update_driver('GV6', stats['pm2.5_6hour']) if 'pm2.5_24hour' in stats: self.update_driver('GV7', stats['pm2.5_24hour']) if 'pm2.5_1week' in stats: self.update_driver('GV8', stats['pm2.5_1week']) except Exception as e: LOGGER.error('Current observation update failure') LOGGER.error(e)
bpaauwe/udi-purpleair-poly
nodes/sensor.py
sensor.py
py
7,199
python
en
code
0
github-code
13
32763231633
import os import sys os.environ['SPARK_HOME'] = "/usr/hdp/3.0.1.0-187/spark2" os.environ['HIVE_HOME'] = "/usr/hdp/3.0.1.0-187/hive" os.environ["HADOOP_USER_NAME"] = "spark" os.environ['PYSPARK_SUBMIT_ARGS'] = '--master yarn --deploy-mode client ' \ '--num-executors 11 --executor-memory 19G --executor-cores 5 ' \ '--driver-memory 1G pyspark-shell' sys.path.append("/usr/hdp/3.0.1.0-187/spark2/python") from pyspark.sql import SparkSession from pyspark.sql.functions import * spark = SparkSession.builder.appName("pySpark1_redeyesofangel").getOrCreate() spark.sql("select current_timestamp() ").show() spark.stop()
shhan1987/redeyesofangel
pySpark_1.py
pySpark_1.py
py
692
python
en
code
0
github-code
13
5847705059
#!/usr/bin/env python3 """ A derivative of the requests module, which handles caching and allows a cache-only mode for testing (because Python makes it so difficult to mock requests). Overrides get, post, and head all of which also take an additional, optional, cachetime (secs). Also adds ftp_get(server, dir, filename, outfile), ftp_index(server, ftpdir) ftp_url_index(url) """ # Derive everything from requests except the calls overridden in this file: from requests import * import requests import re from bs4 import BeautifulSoup import json import os, sys import time import dateutil.parser import traceback from urllib.parse import urlparse from ftplib import FTP, error_perm # # Some globals # # Default place for the cache CACHEDIR = 'cache' # How old a file can be, in seconds, before being replaced CACHESECS = 2*60*60 # Local mode, don't ever fetch from the network. # Used for unit testing, because none of the Python packages for mocking # network requests (mock, requests_mock, httpretty) actually work. Sigh. LOCAL_MODE = False # Verbose debugging DEBUG = False hrefpat = re.compile('href="([^"]*)">([^<]+)<', flags=re.IGNORECASE) # requests.Response doesn't allow setting the text member, # so here's a fake class that does. class FakeResponse: def __init__(self): self.status_code = 404 self.text = None self.headers = {} def json(self): if not self.text: return "" return json.loads(self.text) # # Override the three important requests module functions # to consult the cache. # def get(url, params=None, **kwargs): """Wrapper for requests.get that can fetch from cache instead. Optional keyword arguments: cachefile: specifies the location of the cache file, otherwise it will be calculated. cachesecs: how old a file can be before being replaced, default: CACHESECS """ if DEBUG: if LOCAL_MODE: print("=== get LOCAL MODE:", url) else: print("=== get :", url) if 'cachefile' in kwargs and kwargs["cachefile"]: cachefile = kwargs['cachefile'] else: cachefile = url_to_cache_filename(url) if DEBUG: print("cachefile:", cachefile) if 'cachesecs' in kwargs: cachesecs = kwargs['cachesecs'] else: cachesecs = CACHESECS # The response that will be returned response = FakeResponse() if LOCAL_MODE: if os.path.exists(cachefile): if DEBUG: print("LOCAL_MODE: Fetching from cachefile:", cachefile) with open(cachefile) as fp: response.text = fp.read() response.status_code = 200 return response print("Eek, cachefile existed but didn't return?") # Cache file doesn't exist, but it's local mode so # can't use the net. if DEBUG: print("*** billrequests.get(): LOCAL_MODE, but " "cachefile %s doesn't exist" % cachefile) print(" for URL", url) response.status_code = 404 response.text = None return response if DEBUG: print("**** billrequests.get: NOT LOCAL MODE") if os.path.exists(cachefile): filestat = os.stat(cachefile) if (time.time() - filestat.st_mtime) < cachesecs or cachesecs < 0: if DEBUG: print("Already cached:", url, '->', cachefile, file=sys.stderr) with open(cachefile) as fp: response.text = fp.read() response.status_code = 200 response.headers['Last-Modified'] = \ time.strftime('%a, %d %b %Y %X %Z', time.localtime(filestat.st_mtime)) return response # The cachefile doesn't exist or was too old. Fetch from the net # and write to the cachefile. # First remove cachefile or cachesecs args that requests isn't expecting: if "cachefile" in kwargs: del kwargs["cachefile"] if "cachesecs" in kwargs: del kwargs["cachesecs"] print("NETWORK get", url, file=sys.stderr) try: response = requests.get(url, params, **kwargs) if response.status_code == 200: # encoding is supposed to default to utf-8, but sometimes # it defaults to ascii despite all reason. Force it: with open(cachefile, "w", encoding='utf-8') as cachefp: cachefp.write(response.text) else: print("*** NETWORK ERROR fetching %s: status code was %d" % (url, response.status_code), file=sys.stderr) except Exception as e: print("*** NETWORK ERROR fetching %s: %s" % (url, str(e)), file=sys.stderr) return response def head(url, **kwargs): """Wrapper for requests.head that can fetch from cache instead. Optional cachefile argument specifies the location of the cache file, otherwise it will be calculated. """ if DEBUG: if LOCAL_MODE: print("=== head LOCAL MODE:", url) else: print("=== head :", url) if 'cachefile' in kwargs and kwargs["cachefile"]: cachefile = kwargs['cachefile'] else: cachefile = url_to_cache_filename(url) if 'cachesecs' in kwargs: cachesecs = kwargs['cachesecs'] else: cachesecs = CACHESECS # The response that will be returned response = FakeResponse() if LOCAL_MODE: if DEBUG: print("head LOCAL MODE:", url, "->", cachefile) if os.path.exists(cachefile): response.status_code = 200 else: response.status_code = 404 return response if DEBUG: print("**** billrequests.head: NOT LOCAL MODE") if os.path.exists(cachefile): filestat = os.stat(cachefile) if (time.time() - filestat.st_mtime) < cachesecs or cachesecs < 0: response.status_code = 200 return response return requests.head(url, **kwargs) # # Some other helpful functions for fetching bill-related files. # # Bill URLs will match this pattern. bill_url_pat = re.compile( r'https://www.nmlegis.gov/Legislation/Legislation\?' r'chamber=([HS])&' r'legtype=([JBR]+)&' r'legno=([0-9]+)&' r'year=([0-9]{2}s?[0-9]*)') def url_to_cache_filename(url, billdic=None): """Calculate the cache filename for the given url. If billdic is provided, it will be used for keys 'billno' and 'year' otherwise all such information will be parsed from the URL. """ # Is it a bill URL? That's true if billdic is set, # or if the bill fits this pattern: if billdic: return os.path.join(CACHEDIR, '20%s-%s.html' % (billdic['year'], billdic['billno'])) bill_url_matcher = bill_url_pat.match(url) if bill_url_matcher: chamber, billtype, number, yearcode = bill_url_matcher.groups() return os.path.join(CACHEDIR, '20%s-%s%s%s.html' % (yearcode, chamber, billtype, number)) # It wasn't a bill URL. Fall back to making a filename # that's similar to the one in the URL. return os.path.join(CACHEDIR, url.replace('https://www.nmlegis.gov/', '') \ .replace('/Legislation', '') \ .replace('/', '_') \ .replace('?', '_') \ .replace('&', '_')) def soup_from_cache_or_net(url, billdic=None, cachesecs=CACHESECS): """url is a full URL including https://www.nmlegis.gov/ . If there is a recent cached version, use it, otherwise fetch the file and cache it. If the cache file is older than cachesecs, replace it. If billdic is provided, it will be used for keys 'billno' and 'year' to make a cleaner cache file name, like '2020-HB31.html'. Either way, return a BS soup of the contents. """ if DEBUG: print("=== soup_from_cache_or_net:", url, "billdic", billdic) cachefile = url_to_cache_filename(url, billdic) response = get(url, cachefile=cachefile, cachesecs=cachesecs) if response.status_code != 200: print("No soup! Response was", response.status_code, file=sys.stderr) print(" on cache %s,\n URL %s" % (cachefile, url), file=sys.stderr) # print(traceback.print_stack(), file=sys.stderr) if DEBUG: print(">>>>> Couldn't get", url, "-->", cachefile, file=sys.stderr) # print("wget '%s' -O %s" % (url, cachefile), file=sys.stderr) return None soup = BeautifulSoup(response.text, "lxml") if not soup: print("No soup! On cache %s,\n URL %s" % (cachefile, url), file=sys.stderr) # print(traceback.print_stack(), file=sys.stderr) if DEBUG: print("***** Couldn't get", url, "-->", cachefile, file=sys.stderr) return soup # Seriously? requests can't handle ftp? def ftp_get(server, dir, filename, outfile): """Fetch a file via ftp. Write the content to a file. """ try: ftp = FTP(server) ftp.login() ftp.cwd(dir) ftp.retrbinary('%s' % filename, open(outfile, 'wb').write) ftp.quit() except error_perm as e: raise FileNotFoundError(str(e)) def get_html_dirlist(url): """Read an html dir listing page; return the contents as a list of dics, [ { 'name': 'SB0048SFL1.pdf, 'size': '136 KB', "url": "https://www.nmlegis.gov/Sessions/20%20Regular/firs/HB0004.PDF", 'Last Modified': '1/24/19 1:19:00 PM MST } ] Note that times are datetimes but no timezone is set. Frustratingly, if you view the ftp: URL in a web server it shows timezones, but actually retrieving the listing via ftp drops them. """ while url.endswith('/'): url = url[:-1] try: cachefile = url_to_cache_filename(url) response = get(url, cachefile=cachefile) listing = response.text except Exception as e: print("Exception getting dirlist on", url, ":", e, file=sys.stderr) return [] if not listing: print("No listing, cachefile was", cachefile) return [] ls = [] # The listing is inside a <pre>, with lines separated by <br>, # and each line is formatted like this: # 1/25/2020 7:32 PM 133392 <A HREF="/Sessions/20%20Regular/firs/HB0019.PDF">HB0019.PDF</A><br> # Strip off everything that's not inside <pre></pre> # This is a large file, and using re on it takes forEVER. # String find is much faster. pre = listing.find("<pre>") if pre < 0: pre = listing.find("<PRE>") if pre > 0: listing = listing[pre+5:] pre = listing.find("</pre>") if pre < 0: pre = listing.find("</PRE>") if pre > 0: listing = listing[:pre] lines = listing.split("<br>") for line in lines: words = line.split() if len(words) != 6: continue try: dic = {} try: dic["size"] = int(words[3]) month, day, year = [int(n) for n in words[0].split("/")] hour, minute = [int(n) for n in words[1].split(":")] if words[2] == "PM": hour += 12 dic["Last Modified"] = "%s\t%s %s MST" % tuple(words[0:3]) # words[5] looks like: # 'HREF="/Sessions/20%20Regular/firs/HB0001.PDF">HB0001.PDF</A>' match = hrefpat.match(words[5]) url = match.group(1) while url.startswith('/'): url = url[1:] dic["url"] = "https://www.nmlegis.gov/" + url dic["name"] = match.group(2) except Exception as e: # print("Couldn't parse line of directory listing in", url, ":", # line, file=sys.stderr) continue ls.append(dic) except RuntimeError as e: continue return ls def ftp_index(server, ftpdir): """Read an ftp index page; return the contents as a list of dics, [ { 'name': 'SB0048SFL1.pdf, 'size': '136 KB', 'Last Modified': '1/24/19 1:19:00 PM MST } ] Note that times are datetimes but no timezone is set. Frustratingly, if you view the ftp: URL in a web server it shows timezones, but actually retrieving the listing via ftp drops them. """ # print("Fetching index of %s from %s" % (ftpdir, server)) ftp = FTP(server) ftp.login() ftp.cwd(ftpdir) ls = [] # MLST and MLSD are supposedly the right way to do this, but # ftp.nmlegis.gov doesn't support it. Uncomment and finish # implementing this if your server does offer MLSD. # ftp.retrlines('MLSD', ls.append) for entry in ls: print(entry) listlines = [] listout = ftp.retrlines('LIST', listlines.append) # Lines for directories: # 12-19-18 10:03AM <DIR> Legislator Information # Lines for files: # 01-24-19 04:06PM 93184 Legislators.XLS # 01-28-19 12:58PM 288257 HB0005.PDF listing = [] baseurl = "ftp://%s/%s" % (server, ftpdir) for line in listlines: if '<DIR>' in line: match = re.match('(\d+-\d+-\d+ +\d+:\d+[AP]M) +<DIR> +(.+)', line) if match: listing.append({ "name": match.group(2), "url": "%s/%s" % (baseurl, match.group(2)), "Last Modified": dateutil.parser.parse(match.group(1)), "size": int(match.group(2)) }) else: match = re.match('(\d+-\d+-\d+ +\d+:\d+[AP]M) +(\d+) +(.+)', line) if match: listing.append({ "name": match.group(3), "url": "%s/%s" % (baseurl, match.group(3)), "Last Modified": dateutil.parser.parse(match.group(1)), "size": int(match.group(2)) }) return listing def ftp_url_index(url): """Read an ftp index page; return the contents as a list of dics. """ purl = urlparse(url) if not purl.scheme: netloc = 'www.nmlegis.gov' elif purl.scheme == 'ftp': netloc = purl.netloc else: raise RuntimeError("ftp_url_index: bad URL %s" % url) return ftp_index(netloc, purl.path) if __name__ == '__main__': pass
akkana/billtracker
billtracker/bills/billrequests.py
billrequests.py
py
14,794
python
en
code
5
github-code
13
3100795310
# Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html import os import sys from collections import defaultdict from itemadapter import ItemAdapter # useful for handling different item types with a single interface class GameRatingsPipeline: dict = defaultdict(list) def process_item(self, item, spider): page = item['page'] results = item['results'] self.dict[page] += results spider.pbar.update(1) def close_spider(self, spider): filename = f'{spider.name}.csv' home = os.path.expanduser('~') path = getattr(self, 'path', f'{home}/Documents') filepath = f'{path}/{filename}' with open(filepath, 'w') as f: f.write('date,title,platform,score,mustplay\n') for page, results in sorted(self.dict.items()): for result in results: f.write(f'{result}\n')
ch3rub1m/game_ratings
game_ratings/pipelines.py
pipelines.py
py
1,017
python
en
code
0
github-code
13
5036699642
# Famous Quote Program # Author: hifza zafar # Date: 10november2023 # Store the famous person's name in a variable famous_person = "Albert Einstein" # Store the quote in a variable quote = "The only real valuable thing is intuition." # Compose the message message = f'{famous_person} once said, "{quote}"' # Favorite Number Program # Author: hifzazafar # Date: 10november2023 # Store your favorite number in a variable favorite_number = 0 # Create a message revealing your favorite number message = f"My favorite number is {favorite_number}." # Print the message print(message) # Print the message print(message)
hif-zafar/piaic_class1_assignment
question10.py
question10.py
py
651
python
en
code
0
github-code
13
26836898012
import turtle import random def screenClick(x, y): r=random.random() g=random.random() b=random.random() R=random.random() G=random.random() B=random.random() #선과 거북이 색 랜덤 angle = random.randrange(0, 360) #각도 0~360 사이 랜덤 t_size = random.randrange(2,8) #크기 2~8사이 랜덤 turtle.left(angle) #먼저 각도 정해두고 turtle.pencolor((r, g, b)) #랜덤한 rgb값 부여. turtle.goto(x,y) #클릭된 위치로 이동 turtle.turtlesize(t_size) turtle.color((R,G,B)) #크기와 거북이 색 지정 turtle.stamp() #스탬프 ##전역변수 pSize = 5 r,g,b, R,G,B = 0,0,0,0,0,0 angle=0 t_size = 0 ##main turtle.title("위치만 네 맘, 다른건 내 맘") turtle.shape('turtle') turtle.pensize(pSize) turtle.pendown() turtle.onscreenclick(screenClick, 3) turtle.done()
inte168/OpenProject1
2weak/ch02-1.py
ch02-1.py
py
896
python
ko
code
0
github-code
13
15806378353
from __future__ import division import os import rospkg from python_qt_binding import loadUi from python_qt_binding.QtCore import Qt, QTimer, qWarning, Slot from python_qt_binding.QtWidgets import QAction, QMenu, QWidget import rospy from rostopic import get_topic_class from rqt_py_common.topic_helpers import find_slots_by_type_bfs from tf.transformations import quaternion_matrix, quaternion_about_axis from geometry_msgs.msg import Quaternion, Pose, Point from OpenGL.GL import glBegin, glColor3f, glEnd, glLineWidth, glMultMatrixf, glTranslatef, \ glVertex3f, GL_LINES, GL_QUADS from .gl_widget import GLWidget # main class inherits from the ui window class class PoseViewWidget(QWidget): def __init__(self, plugin): super(PoseViewWidget, self).__init__() rp = rospkg.RosPack() ui_file = os.path.join(rp.get_path('rqt_pose_view'), 'resource', 'PoseViewWidget.ui') loadUi(ui_file, self) self._plugin = plugin self._position = (2.0, 2.0, 2.0) self._orientation = quaternion_about_axis(0.0, (1.0, 0.0, 0.0)) self._topic_name = None self._subscriber = None # create GL view self._gl_view = GLWidget() self._gl_view.setAcceptDrops(True) # backup and replace original paint method self._gl_view.paintGL_original = self._gl_view.paintGL self._gl_view.paintGL = self._gl_view_paintGL # backup and replace original mouse release method self._gl_view.mouseReleaseEvent_original = self._gl_view.mouseReleaseEvent self._gl_view.mouseReleaseEvent = self._gl_view_mouseReleaseEvent # add GL view to widget layout self.layout().addWidget(self._gl_view) # init and start update timer with 40ms (25fps) self._update_timer = QTimer(self) self._update_timer.timeout.connect(self.update_timeout) self._update_timer.start(40) def save_settings(self, plugin_settings, instance_settings): view_matrix_string = repr(self._gl_view.get_view_matrix()) instance_settings.set_value('view_matrix', view_matrix_string) def restore_settings(self, plugin_settings, instance_settings): view_matrix_string = instance_settings.value('view_matrix') try: view_matrix = eval(view_matrix_string) except Exception: view_matrix = None if view_matrix is not None: self._gl_view.set_view_matrix(view_matrix) else: self._set_default_view() def _set_default_view(self): self._gl_view.makeCurrent() self._gl_view.reset_view() self._gl_view.rotate((0, 0, 1), 45) self._gl_view.rotate((1, 0, 0), -65) self._gl_view.translate((0, -3, -15)) def update_timeout(self): self._gl_view.makeCurrent() self._gl_view.updateGL() def _gl_view_paintGL(self): self._gl_view.paintGL_original() self._paintGLGrid() self._paintGLCoorsystem() self._paintGLBox() def _paintGLBox(self): # FIXME: add user configurable setting to allow use of translation as well self._position = (2.0, 2.0, 2.0) # Set fixed translation for now glTranslatef(*self._position) # Translate Box matrix = quaternion_matrix(self._orientation) # convert quaternion to translation matrix # tf uses row-major while gl expects column-major matrix = matrix.transpose() glMultMatrixf(matrix) # Rotate Box glBegin(GL_QUADS) # Start Drawing The Box glColor3f(0.0, 1.0, 0.0) glVertex3f(1.0, 1.0, -1.0) # Top Right Of The Quad (Top) glVertex3f(-1.0, 1.0, -1.0) # Top Left Of The Quad (Top) glVertex3f(-1.0, 1.0, 1.0) # Bottom Left Of The Quad (Top) glVertex3f(1.0, 1.0, 1.0) # Bottom Right Of The Quad (Top) glColor3f(0.5, 1.0, 0.5) glVertex3f(1.0, -1.0, 1.0) # Top Right Of The Quad (Bottom) glVertex3f(-1.0, -1.0, 1.0) # Top Left Of The Quad (Bottom) glVertex3f(-1.0, -1.0, -1.0) # Bottom Left Of The Quad (Bottom) glVertex3f(1.0, -1.0, -1.0) # Bottom Right Of The Quad (Bottom) glColor3f(0.0, 0.0, 1.0) glVertex3f(1.0, 1.0, 1.0) # Top Right Of The Quad (Front) glVertex3f(-1.0, 1.0, 1.0) # Top Left Of The Quad (Front) glVertex3f(-1.0, -1.0, 1.0) # Bottom Left Of The Quad (Front) glVertex3f(1.0, -1.0, 1.0) # Bottom Right Of The Quad (Front) glColor3f(0.5, 0.5, 1.0) glVertex3f(1.0, -1.0, -1.0) # Bottom Left Of The Quad (Back) glVertex3f(-1.0, -1.0, -1.0) # Bottom Right Of The Quad (Back) glVertex3f(-1.0, 1.0, -1.0) # Top Right Of The Quad (Back) glVertex3f(1.0, 1.0, -1.0) # Top Left Of The Quad (Back) glColor3f(1.0, 0.5, 0.5) glVertex3f(-1.0, 1.0, 1.0) # Top Right Of The Quad (Left) glVertex3f(-1.0, 1.0, -1.0) # Top Left Of The Quad (Left) glVertex3f(-1.0, -1.0, -1.0) # Bottom Left Of The Quad (Left) glVertex3f(-1.0, -1.0, 1.0) # Bottom Right Of The Quad (Left) glColor3f(1.0, 0.0, 0.0) glVertex3f(1.0, 1.0, -1.0) # Top Right Of The Quad (Right) glVertex3f(1.0, 1.0, 1.0) # Top Left Of The Quad (Right) glVertex3f(1.0, -1.0, 1.0) # Bottom Left Of The Quad (Right) glVertex3f(1.0, -1.0, -1.0) # Bottom Right Of The Quad (Right) glEnd() # Done Drawing The Quad def _paintGLGrid(self): resolution_millimeters = 1 gridded_area_size = 100 glLineWidth(1.0) glBegin(GL_LINES) glColor3f(1.0, 1.0, 1.0) glVertex3f(gridded_area_size, 0, 0) glVertex3f(-gridded_area_size, 0, 0) glVertex3f(0, gridded_area_size, 0) glVertex3f(0, -gridded_area_size, 0) num_of_lines = int(gridded_area_size / resolution_millimeters) for i in range(num_of_lines): glVertex3f(resolution_millimeters * i, -gridded_area_size, 0) glVertex3f(resolution_millimeters * i, gridded_area_size, 0) glVertex3f(gridded_area_size, resolution_millimeters * i, 0) glVertex3f(-gridded_area_size, resolution_millimeters * i, 0) glVertex3f(resolution_millimeters * (-i), -gridded_area_size, 0) glVertex3f(resolution_millimeters * (-i), gridded_area_size, 0) glVertex3f(gridded_area_size, resolution_millimeters * (-i), 0) glVertex3f(-gridded_area_size, resolution_millimeters * (-i), 0) glEnd() def _paintGLCoorsystem(self): glLineWidth(10.0) glBegin(GL_LINES) glColor3f(1.0, 0.0, 0.0) glVertex3f(0.0, 0.0, 0.0) glVertex3f(1.0, 0.0, 0.0) glColor3f(0.0, 1.0, 0.0) glVertex3f(0.0, 0.0, 0.0) glVertex3f(0.0, 1.0, 0.0) glColor3f(0.0, 0.0, 1.0) glVertex3f(0.0, 0.0, 0.0) glVertex3f(0.0, 0.0, 1.0) glEnd() def _gl_view_mouseReleaseEvent(self, event): if event.button() == Qt.RightButton: menu = QMenu(self._gl_view) action = QAction(self._gl_view.tr("Reset view"), self._gl_view) menu.addAction(action) action.triggered.connect(self._set_default_view) menu.exec_(self._gl_view.mapToGlobal(event.pos())) @Slot('QDragEnterEvent*') def dragEnterEvent(self, event): if event.mimeData().hasText(): topic_name = str(event.mimeData().text()) if len(topic_name) == 0: qWarning('PoseViewWidget.dragEnterEvent(): event.mimeData() text is empty') return else: if not hasattr(event.source(), 'selectedItems') or len(event.source().selectedItems()) == 0: qWarning('PoseViewWidget.dragEnterEvent(): event.source() has no attribute selectedItems or length of selectedItems is 0') return item = event.source().selectedItems()[0] topic_name = item.data(0, Qt.UserRole) if topic_name is None: qWarning('PoseViewWidget.dragEnterEvent(): selectedItem has no UserRole data with a topic name') return # check for valid topic msg_class, self._topic_name, _ = get_topic_class(topic_name) if msg_class is None: qWarning('PoseViewWidget.dragEnterEvent(): No message class was found for topic "%s".' % topic_name) return # check for valid message class quaternion_slot_path, point_slot_path = self._get_slot_paths(msg_class) if quaternion_slot_path is None and point_slot_path is None: qWarning('PoseViewWidget.dragEnterEvent(): No Pose, Quaternion or Point data was found outside of arrays in "%s" on topic "%s".' % (msg_class._type, topic_name)) return event.acceptProposedAction() @Slot('QDropEvent*') def dropEvent(self, event): if event.mimeData().hasText(): topic_name = str(event.mimeData().text()) else: dropped_item = event.source().selectedItems()[0] topic_name = str(dropped_item.data(0, Qt.UserRole)) self._unregister_topic() self._subscribe_topic(topic_name) def _unregister_topic(self): if self._subscriber: self._subscriber.unregister() @staticmethod def _make_path_list_from_path_string(path): path = path.split('/') if path == ['']: return [] return path @staticmethod def _get_slot_paths(msg_class): # find first Pose in msg_class pose_slot_paths = find_slots_by_type_bfs(msg_class, Pose) for path in pose_slot_paths: # make sure the path does not contain an array, because we don't want to deal with empty arrays... if '[' not in path: path = PoseViewWidget._make_path_list_from_path_string(pose_slot_paths[0]) return path + ['orientation'], path + ['position'] # if no Pose is found, find first Quaternion and Point quaternion_slot_paths = find_slots_by_type_bfs(msg_class, Quaternion) for path in quaternion_slot_paths: if '[' not in path: quaternion_slot_path = PoseViewWidget._make_path_list_from_path_string(path) break else: quaternion_slot_path = None point_slot_paths = find_slots_by_type_bfs(msg_class, Point) for path in point_slot_paths: if '[' not in path: point_slot_path = PoseViewWidget._make_path_list_from_path_string(path) break else: point_slot_path = None return quaternion_slot_path, point_slot_path def _subscribe_topic(self, topic_name): msg_class, self._topic_name, _ = get_topic_class(topic_name) quaternion_slot_path, point_slot_path = self._get_slot_paths(msg_class) self._subscriber = rospy.Subscriber( self._topic_name, msg_class, self.message_callback, callback_args=(quaternion_slot_path, point_slot_path) ) def message_callback(self, message, slot_paths): quaternion_slot_path = slot_paths[0] point_slot_path = slot_paths[1] if quaternion_slot_path is None: self._orientation = quaternion_about_axis(0.0, (1.0, 0.0, 0.0)) else: orientation = message for slot_name in quaternion_slot_path: orientation = getattr(orientation, slot_name) self._orientation = (orientation.x, orientation.y, orientation.z, orientation.w) if point_slot_path is None: # if no point is given, set it to a fixed offset so the axes can be seen self._position = (2.0, 2.0, 2.0) else: position = message for slot_name in point_slot_path: position = getattr(position, slot_name) self._position = (position.x, position.y, position.z) def shutdown_plugin(self): self._unregister_topic()
jincheng-ai/ros-melodic-python3-opencv4
xacro/rqt_pose_view/src/rqt_pose_view/pose_view_widget.py
pose_view_widget.py
py
12,345
python
en
code
5
github-code
13
36134062566
import requests import os import http.cookiejar as cookielib import re session = requests.session() session.cookies = cookielib.LWPCookieJar(filename='cookies.txt') try: session.cookies.load(ignore_discard = True) print('Load Cookie') except: print("Cannot load Cookie") agent = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36' header = { 'Host': 'www.zhihu.com', "Referer": "https://www.zhizhu.com", 'User-Agent': agent } def is_login(): inbox_url = 'https://www.zhihu.com/inbox' response = session.get(inbox_url, headers = header, allow_redirects = False) if response.status_code==200: return True else: return False def get_captcha(): import time t = str(int(time.time()*1000)) captcha_url = 'https://www.zhihu.com/captcha.gif?r={}&type=login'.format(t) response = session.get(captcha_url, headers = header) with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'captcha.jpg'), 'wb') as f: f.write(response.content) from PIL import Image try: im = Image.open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'captcha.jpg')) im.show() im.close() except: pass captcha = input("input the captcha \n >") return captcha def get_index(): url = 'https://www.zhihu.com' response = session.get(url, headers = header) with open(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'index_page.html'), 'wb') as f: f.write(response.text.encode('utf-8')) print('OK') def get_csrf(): response = session.get('https://www.zhihu.com', headers = header) text = response.text pattern = r'.*name="_xsrf" value="(.*)"' re_match = re.match(pattern, text, re.S) if re_match: return re_match.group(1) else: return '' def zhihu_login(account, password): phone_pattern = r'^\d{9,10}$' post_url = '' post_data = dict() post_data['password'] = password post_data['_xsrf'] = get_csrf() post_data['captcha'] = get_captcha() if re.match(phone_pattern, account): post_url ='https://www.zhihu.com/login/phone_num' post_data['phone_num'] = account elif "@" in account: post_url ='https://www.zhihu.com/login/email' post_data['email'] = account response = session.post(post_url, post_data, headers=header) print(response.text) session.cookies.save() # zhihu_login('5083088487', 'Zuoqian690712') print(is_login()) get_index()
codescracker/web_crawler
ArticleSpider/utils/zhihu_login_requests.py
zhihu_login_requests.py
py
2,657
python
en
code
0
github-code
13
7985163004
import contextlib import sys from enum import IntEnum, IntFlag import bluetooth import app_args from config import set_default_bt, get_default_bt from label_rasterizer import encode_png, rasterize STATUS_OFFSET_ERROR_INFORMATION_1 = 8 STATUS_OFFSET_ERROR_INFORMATION_2 = 9 STATUS_OFFSET_MEDIA_WIDTH = 10 STATUS_OFFSET_MEDIA_TYPE = 11 STATUS_OFFSET_MODE = 15 STATUS_OFFSET_MEDIA_LENGTH = 17 STATUS_OFFSET_STATUS_TYPE = 18 STATUS_OFFSET_PHASE_TYPE = 19 STATUS_OFFSET_PHASE_NUMBER = 20 STATUS_OFFSET_NOTIFICATION_NUMBER = 22 STATUS_OFFSET_TAPE_COLOR_INFORMATION = 24 STATUS_OFFSET_TEXT_COLOR_INFORMATION = 25 STATUS_OFFSET_HARDWARE_SETTINGS = 26 # Map the size of tape to the number of dots on the print area TZE_DOTS = { 3: 24, # Actually 3.5mm, not sure how this is reported if its 3 or 4 6: 32, 9: 50, 12: 70, 18: 112, 24: 128 } # Global for the media width so we can make sure we rasterize/ print to the right size detected_media_width = -1 class ErrorInformation1(IntFlag): NO_MEDIA = 0x01 CUTTER_JAM = 0x04 WEAK_BATTERIES = 0x08 HIGH_VOLTAGE_ADAPTER = 0x40 class ErrorInformation2(IntFlag): WRONG_MEDIA = 0x01 COVER_OPEN = 0x10 OVERHEATING = 0x20 class MediaType(IntEnum): NO_MEDIA = 0x00 LAMINATED_TAPE = 0x01 NON_LAMINATED_TAPE = 0x03 HEAT_SHRINK_TUBE = 0x11 INCOMPATIBLE_TAPE = 0xFF class Mode(IntFlag): AUTO_CUT = 0x40 MIRROR_PRINTING = 0x80 class StatusType(IntEnum): REPLY_TO_STATUS_REQUEST = 0x00 PRINTING_COMPLETED = 0x01 ERROR_OCCURRED = 0x02 TURNED_OFF = 0x04 NOTIFICATION = 0x05 PHASE_CHANGE = 0x06 class PhaseType(IntEnum): EDITING_STATE = 0x00 PRINTING_STATE = 0x01 class PhaseNumberEditingState(IntEnum): EDITING_STATE = 0x0000 FEED = 0x0001 class PhaseNumberPrintingState(IntEnum): PRINTING = 0x0000 COVER_OPEN_WHILE_RECEIVING = 0x0014 class NotificationNumber(IntEnum): NOT_AVAILABLE = 0x00 COVER_OPEN = 0x01 COVER_CLOSED = 0x02 class TapeColor(IntEnum): WHITE = 0x01 OTHER = 0x02 CLEAR = 0x03 RED = 0x04 BLUE = 0x05 YELLOW = 0x06 GREEN = 0x07 BLACK = 0x08 CLEAR_WHITE_TEXT = 0x09 MATTE_WHITE = 0x20 MATTE_CLEAR = 0x21 MATTE_SILVER = 0x22 SATIN_GOLD = 0x23 SATIN_SILVER = 0x24 BLUE_D = 0x30 RED_D = 0x31 FLUORESCENT_ORANGE = 0x40 FLUORESCENT_YELLOW = 0x41 BERRY_PINK_S = 0x50 LIGHT_GRAY_S = 0x51 LIME_GREEN_S = 0x52 YELLOW_F = 0x60 PINK_F = 0x61 BLUE_F = 0x62 WHITE_HEAT_SHRINK_TUBE = 0x70 WHITE_FLEX_ID = 0x90 YELLOW_FLEX_ID = 0x91 CLEANING = 0xF0 STENCIL = 0xF1 INCOMPATIBLE = 0xFF class TextColor(IntEnum): WHITE = 0x01 OTHER = 0x02 RED = 0x04 BLUE = 0x05 BLACK = 0x08 GOLD = 0x0A BLUE_F = 0x62 CLEANING = 0xF0 STENCIL = 0xF1 INCOMPATIBLE = 0XFF @contextlib.contextmanager def bt_socket_manager(*args, **kwargs): socket = bluetooth.BluetoothSocket(*args, **kwargs) yield socket socket.close() def get_printer_info(bt_address, bt_channel): with bt_socket_manager(bluetooth.RFCOMM) as socket: socket.connect((bt_address, bt_channel)) send_invalidate(socket) send_initialize(socket) send_status_information_request(socket) status_information = receive_status_information_response(socket) handle_status_information(status_information) def make_label(options): with bt_socket_manager(bluetooth.RFCOMM) as socket: socket.connect((options.bt_address, options.bt_channel)) send_invalidate(socket) send_initialize(socket) send_status_information_request(socket) status_information = receive_status_information_response(socket) handle_status_information(status_information) width = TZE_DOTS.get(detected_media_width) data = encode_png(options.image, width) send_switch_dynamic_command_mode(socket) send_switch_automatic_status_notification_mode(socket) send_print_information_command(socket, len(data), detected_media_width) send_various_mode_settings(socket) send_advanced_mode_settings(socket) send_specify_margin_amount(socket) send_select_compression_mode(socket) send_raster_data(socket, data) send_print_command_with_feeding(socket) while True: status_information = receive_status_information_response(socket) handle_status_information(status_information) def send_invalidate(socket: bluetooth.BluetoothSocket): """send 100 null bytes""" socket.send(b"\x00" * 100) def send_initialize(socket: bluetooth.BluetoothSocket): """Send Initialization Code [1B 40]""" socket.send(b"\x1B\x40") def send_switch_dynamic_command_mode(socket: bluetooth.BluetoothSocket): """set dynamic command mode to "raster mode" [1B 69 61 {01}]""" socket.send(b"\x1B\x69\x61\x01") def send_switch_automatic_status_notification_mode(socket: bluetooth.BluetoothSocket): """set automatic status notification mode to "notify" [1B 69 21 {00}]""" socket.send(b"\x1B\x69\x21\x00") def send_print_information_command(socket: bluetooth.BluetoothSocket, data_length: int, width): """ Print to tape Command: [1B 69 7A {84 00 18 00 <data length 4 bytes> 00 00}] This is defined in the Brother Documentation under 'ESC i z Print information command' :param socket: The bluetooth socket to use :param data_length: The length of the data that will be sent :param width: Width of the tape used in mm. Defaults to 24mm """ socket.send(b"\x1B\x69\x7A\x84\x00") socket.send(chr(width)) # n3 as per docs socket.send(b"\x00") # n4 socket.send((data_length >> 4).to_bytes(4, 'little')) socket.send(b"\x00\x00") def send_various_mode_settings(socket: bluetooth.BluetoothSocket): """set to auto-cut, no mirror printing [1B 69 4D {40}]""" socket.send(b"\x1B\x69\x4D") socket.send(Mode.AUTO_CUT.to_bytes(1, "big")) def send_advanced_mode_settings(socket: bluetooth.BluetoothSocket): """Set print chaining off [1B 69 4B {08}]""" socket.send(b"\x1B\x69\x4B\x08") def send_specify_margin_amount(socket: bluetooth.BluetoothSocket): """Set margin (feed) amount to 0 [1B 69 64 {00 00}]""" socket.send(b"\x1B\x69\x64\x00\x00") def send_select_compression_mode(socket: bluetooth.BluetoothSocket): """Set to TIFF compression [4D {02}]""" socket.send(b"\x4D\x02") def send_raster_data(socket: bluetooth.BluetoothSocket, data): """Send all raster data lines""" for line in rasterize(data): socket.send(bytes(line)) def send_print_command_with_feeding(socket: bluetooth.BluetoothSocket): """print and feed [1A]""" socket.send(b"\x1A") def send_status_information_request(socket: bluetooth.BluetoothSocket): """request status information [1B 69 53]""" socket.send(b"\x1B\x69\x53") def receive_status_information_response(socket: bluetooth.BluetoothSocket): """receive status information""" response = socket.recv(32) if len(response) != 32: sys.exit("Expected 32 bytes, but only received %d" % len(response)) return response def handle_status_information(status_information): def handle_reply_to_status_request(status_information): global detected_media_width print("Printer Status") print("--------------") print("Media Width: %dmm" % status_information[STATUS_OFFSET_MEDIA_WIDTH]) print("Media Type: %s" % MediaType(status_information[STATUS_OFFSET_MEDIA_TYPE]).name) print("Tape Color: %s" % TapeColor(status_information[STATUS_OFFSET_TAPE_COLOR_INFORMATION]).name) print("Text Color: %s" % TextColor(status_information[STATUS_OFFSET_TEXT_COLOR_INFORMATION]).name) print() detected_media_width = status_information[STATUS_OFFSET_MEDIA_WIDTH] def handle_printing_completed(status_information): print("Printing Completed") print("------------------") mode = Mode(status_information[STATUS_OFFSET_MODE]) print("Mode: %s" % ", ".join([f.name for f in Mode if f in mode])) sys.exit(0) def handle_error_occurred(status_information): print("Error Occurred") print("--------------") error_information_1 = ErrorInformation1(status_information[STATUS_OFFSET_ERROR_INFORMATION_1]) error_information_2 = ErrorInformation2(status_information[STATUS_OFFSET_ERROR_INFORMATION_2]) print("Error information 1: %s" % ", ".join([f.name for f in ErrorInformation1 if f in error_information_1])) print("Error information 2: %s" % ", ".join([f.name for f in ErrorInformation2 if f in error_information_2])) sys.exit("An error has occurred; exiting program") def handle_turned_off(status_information): print("Turned Off") print("----------") sys.exit("Device was turned off") def handle_notification(status_information): print("Notification") print("------------") print(f"Notification number: {NotificationNumber(status_information[STATUS_OFFSET_NOTIFICATION_NUMBER]).name}") print() def handle_phase_change(status_information): print("Phase Changed") print("-------------") phase_type = status_information[STATUS_OFFSET_PHASE_TYPE] phase_number = int.from_bytes(status_information[STATUS_OFFSET_PHASE_NUMBER:STATUS_OFFSET_PHASE_NUMBER + 2], "big") print("Phase type: %s" % PhaseType(phase_type).name) print("Phase number: %s" % (PhaseNumberPrintingState( phase_number) if phase_type == PhaseType.PRINTING_STATE else PhaseNumberEditingState(phase_number)).name) print() handlers = { StatusType.REPLY_TO_STATUS_REQUEST: handle_reply_to_status_request, StatusType.PRINTING_COMPLETED: handle_printing_completed, StatusType.ERROR_OCCURRED: handle_error_occurred, StatusType.TURNED_OFF: handle_turned_off, StatusType.NOTIFICATION: handle_notification, StatusType.PHASE_CHANGE: handle_phase_change } status_type = status_information[STATUS_OFFSET_STATUS_TYPE] handlers[status_type](status_information) def bad_options(message): print(f"Error: {message}. Use {sys.argv[0]} --help to get more information") exit(1) def main(): options = app_args.parse() if not options.info and not options.image: bad_options('Image path required') if options.set_default: if not options.bt_address: bad_options('You must provide a BT address to set as default') else: set_default_bt(options.bt_address) print(f"{options.bt_address} set as default BT address") if not options.bt_address: default_bt = get_default_bt() if not default_bt: bad_options("BT Address is required. If you'd like to remember it use --set-default") options.bt_address = default_bt print(f"Using BT Address of {options.bt_address}") if options.info: get_printer_info(options.bt_address, options.bt_channel) exit(0) make_label(options) if __name__ == "__main__": main()
SkoZombie/pt-p710bt-label-maker
label_maker.py
label_maker.py
py
11,344
python
en
code
null
github-code
13
31346198814
import random import math class Graph(object): def __init__(self, points, cost_matrix, rank): """ :param points: list of tuples for the coordinates of points :param cost_matrix: matrix of distance among locations, 2d array :param rank: number of locations, int """ self.points = points self.matrix = cost_matrix self.rank = rank # initiate a pheromone matrix in which the pheromone density for all paths are the same self.pheromone = [[1/(rank * rank) for i in range(rank)] for j in range(rank)] class ACO(object): def __init__(self, num_ants=10, num_itr=100, alpha=1, beta=10, rho=0.5, q=10): """ :param num_ants: number of ants we use in this algorithm, int :param num_itr: number of iterations we try, int :param alpha: relative importance of pheromone amount, float :param beta: relative importance of heuristic, float :param rho: evaporation rate of pheromone, float :param q: constant Q, float """ self.Q = q self.rho = rho self.alpha = alpha self.beta = beta self.num_ants = num_ants self.num_itr = num_itr def _update_pheromone(self, graph: Graph, ants: list): for i in range(graph.rank): for j in range(graph.rank): graph.pheromone[i][j] *= (1-self.rho) for ant in ants: graph.pheromone[i][j] += ant.pheromone_delta[i][j] def solve(self, graph): """ :param graph: we take graph as our input :return: return the best path and best length after iterations """ best_length = math.inf best_path = [] all_path = [] all_length = [] for itr in range(self.num_itr): ants = [_Ant(self, graph) for i in range(self.num_ants)] for ant in ants: for i in range(graph.rank - 1): ant.select_next() ant.total_length += graph.matrix[ant.tabu[-1]][ant.tabu[0]] all_path.append(ant.tabu) all_length.append(ant.total_length) if ant.total_length < best_length: best_length = ant.total_length best_path = ant.tabu # all_path.append(best_path) # all_length.append(best_length) release this code to have a increasing performance plot ant.update_delta_pheromone() self._update_pheromone(graph, ants) return best_path, best_length, all_path, all_length class _Ant(object): def __init__(self, aco, graph): """ :param aco: input of the current optimization, ACO :param graph: input of the distance and rank, Graph """ self.colony = aco self.graph = graph self.total_length = 0.0 self.tabu = [] # a tabu list which tells the ants the location that they have been travelled self.pheromone_delta = [] # the increment amount of pheromone at each step self.allowed = [i for i in range(graph.rank)] # a list of available locations an ant can go next time # eta denotes the heuristic value we use in this algorithm, and it will be 1/distance(i,j) self.eta = [[0 if i == j else 1/graph.matrix[i][j] for i in range(graph.rank)]for j in range(graph.rank)] # initiate an ant with a random starting point start = random.randint(0, graph.rank-1) self.tabu.append(start) self.current = start self.allowed.remove(start) def select_next(self): """ :return: make the ant choose its next step, and update the tabu list, allowed list and total length """ # The value of this total_weight is used as the denominator when we are calculating the probability of every # possible move. total_weight = 0 for i in self.allowed: total_weight += (self.graph.pheromone[self.current][i] ** self.colony.alpha) * (self.eta[self.current][i] ** self.colony.beta) # We are trying to calculate the probability for this ant to move to every location probabilities = [0 for i in range(self.graph.rank)] for i in range(self.graph.rank): try: self.allowed.index(i) # try to see if i is a possible location for the next move probabilities[i] = (self.graph.pheromone[self.current][i] ** self.colony.alpha) \ * (self.eta[self.current][i] ** self.colony.beta) / total_weight except ValueError: pass population = [i for i in range(self.graph.rank)] selected = random.choices(population, probabilities)[0] self.allowed.remove(selected) self.tabu.append(selected) self.total_length += self.graph.matrix[self.current][selected] self.current = selected def update_delta_pheromone(self): """ :return: return the increment value of pheromone """ self.pheromone_delta = [[0 for i in range(self.graph.rank)] for j in range(self.graph.rank)] for k in range(1, len(self.tabu)): last = self.tabu[k-1] curr = self.tabu[k] self.pheromone_delta[last][curr] = self.colony.Q / self.total_length
mingzhang1998/Travel_Salesman
ACO.py
ACO.py
py
5,487
python
en
code
0
github-code
13
34346033012
from aioredis_cluster.speedup.ensure_bytes import encode_command as cy_encode_command from aioredis_cluster.speedup.ensure_bytes import ( iter_ensure_bytes as cy_iter_ensure_bytes, ) from aioredis_cluster.util import py_encode_command, py_iter_ensure_bytes from . import run_bench ds = [ ( b"XADD", "queue:queue_shard1", b"MAXLEN", b"~", 10000, b"*", "json", b'{"request_id":"75347c19-5cf4-4919-8247-268e63487908","chunk_num":42,"user_id":"blahblah_:user:uid_d5e1d7efd4de4f7ca36c407e61f2aab8","session_id":"f4a9ae3f-6f1d-4fc3-bcf6-fbb191418c0d","source":"frontent","deadline":1675539675,"last":false,"routing":{"exchange":null,"redis_stream_name":"extra_backend_queue","redis_stream_shards_num":1},"apply_foo":true,"apply_bar":true,"foo_config_name":null,"foo_routes":{"exchange":null,"beer_enabled":true,"beer_threshold":0.24,"vodka_threshold":0.24,"max_num_shots":0,"redis_stream_name":"foo_queue","redis_stream_shards_num":1}}', ), ( b"ZREMRANGEBYSCORE", "push_inbox:{75347c19-5cf4-4919-8247-268e63487908}", float("-inf"), 1674934871.5091486, ), (b"LRANGE", "audios_durations:{75347c19-5cf4-4919-8247-268e63487908}", -1, -1), ] def run_py_iter_ensure_bytes(): for args in ds: list(py_iter_ensure_bytes(args)) def run_cy_iter_ensure_bytes(): for args in ds: list(cy_iter_ensure_bytes(args)) def run_py_encode_command(): for args in ds: py_encode_command(*args) def run_cy_encode_command(): for args in ds: cy_encode_command(*args) def main(): print(run_bench(run_py_iter_ensure_bytes)) print(run_bench(run_cy_iter_ensure_bytes)) print(run_bench(run_py_encode_command)) print(run_bench(run_cy_encode_command)) if __name__ == "__main__": main()
DriverX/aioredis-cluster
benchmarks/cythonize/ensure_bytes.py
ensure_bytes.py
py
1,847
python
en
code
24
github-code
13
32554221516
from unicodedata import category from django.shortcuts import render from .models import Location, Image, Category # Homepage view function def index(request): all_images = Image.objects.all() all_locations = Location.objects.all() all_categories = Category.objects.all() homepage ={"all_images": all_images, 'all_locations':all_locations, 'all_categories':all_categories} return render(request, 'all-photos/index.html', homepage) def locationImg_results(request,location): images = Image.filter_by_location(location) all_locations = Location.objects.all() all_categories = Category.objects.all() location = {'images':images,'all_locations':all_locations, 'all_categories':all_categories } return render(request, 'all-photos/location.html',location) # def nav_items(request): # all_locations = Location.objects.all() # locations = {'all_locations':all_locations} # return render(request, '/navbar.html', locations) def search_results(request): if 'searchImg' in request.GET and request.GET['searchImg']: category = request.GET.get('searchImg') searched_images =Image.search_by_category(category) all_locations = Location.objects.all() all_categories = Category.objects.all() message = f'{category}' category_images = {'all_locations':all_locations, 'all_categories':all_categories, 'images':searched_images, 'message': message} return render(request, 'all-photos/search.html', category_images) else: message = 'Please type category to search' return render(request, 'all-photos/search.html', {'message':message}) def categoryImg_results(request,category): images = Image.filter_by_category(category) all_locations = Location.objects.all() all_categories = Category.objects.all() category = {'images':images,'all_locations':all_locations, 'all_categories':all_categories } return render(request, 'all-photos/category.html',category)
CosBett/Mi-Galeria
photos/views.py
views.py
py
1,997
python
en
code
0
github-code
13
24629104915
#!/usr/bin/env python import os import pandas as pd import pysam import numpy as np import matplotlib import matplotlib.pyplot as plt import seaborn as sns from looper.models import Project # Set settings pd.set_option("date_dayfirst", True) sns.set(context="paper", style="white", palette="pastel", color_codes=True) sns.set_palette(sns.color_palette("colorblind")) matplotlib.rcParams["svg.fonttype"] = "none" matplotlib.rc('text', usetex=False) def get_reads_in_construct(bam, guide_annotation): def overlap_1d(min1, max1, min2, max2): return max(0, min(max1, max2) - max(min1, min2)) bam_handle = pysam.AlignmentFile(bam) reads = pd.DataFrame() # for each "chromosome" (each guideRNA) for chrom in guide_annotation["oligo_name"].unique(): print(chrom) if chrom == "Cas9_blast": continue # get position of alignment guide_seq = guide_annotation[guide_annotation["oligo_name"] == chrom]['sequence'].squeeze() chrom_size = len(prj['crop-seq']['u6'] + guide_seq + prj['crop-seq']['rest']) guide_start_pos = len(prj['crop-seq']['u6']) + 1 guide_end_pos = chrom_size - len(prj['crop-seq']['rest']) # for each read for aln in bam_handle.fetch(reference=chrom + "_chrom"): # skip reads if ( aln.is_qcfail or # failed quality (never happens, but for the future) aln.is_secondary or np.mean(aln.query_alignment_qualities) < 10 or # low mapping Q (never happens, but for the future) "--" in aln.get_reference_sequence() # reads with two+ gaps ): continue # get cell index cell = dict(aln.get_tags())['XC'] # get molecule index molecule = dict(aln.get_tags())['XM'] # determine distance to end of gRNA sequence distance = aln.reference_start - guide_end_pos # determine numbner of overlaping bases overlap = overlap_1d(aln.reference_start, aln.reference_end, guide_start_pos, guide_end_pos) # determine if inside gRNA inside = True if overlap > 0 else False # make sure strand is correct if aln.is_reverse: strand_agreeement = False else: strand_agreeement = True # get alignement quality # aln.mapapping_quality mapping_quality = np.mean(aln.query_alignment_qualities) reads = reads.append(pd.Series([ chrom, cell, molecule, aln.reference_start, aln.reference_end, distance, overlap, inside, mapping_quality, strand_agreeement]), ignore_index=True) reads.columns = [ "chrom", "cell", "molecule", "read_start", "read_end", "distance", "overlap", "inside", "mapping_quality", "strand_agreeement"] return reads def get_reads_in_Cas9_construct(bam): def overlap_1d(min1, max1, min2, max2): return max(0, min(max1, max2) - max(min1, min2)) chrom = "Cas9_blast" bam_handle = pysam.AlignmentFile(bam) reads = pd.DataFrame() sequence = "".join([ prj['crop-seq']['cas9'], prj['crop-seq']['nls'], prj['crop-seq']['flag'], prj['crop-seq']['p2a'], prj['crop-seq']['blast'], prj['crop-seq']['space'], prj['crop-seq']['virus_ltr']]) # get position of alignment chrom_size = len(sequence) guide_start_pos = 0 guide_end_pos = chrom_size - len(prj['crop-seq']['cas9']) # for each read for aln in bam_handle.fetch(reference=chrom + "_chrom"): # skip reads if ( aln.is_qcfail or # failed quality (never happens, but for the future) aln.is_secondary or np.mean(aln.query_alignment_qualities) < 10 or # low mapping Q (never happens, but for the future) "--" in aln.get_reference_sequence() # reads with two+ gaps ): continue # determine distance to start of Cas9 construct distance = guide_start_pos - aln.reference_start if distance < 0: continue # determine numbner of overlaping bases overlap = overlap_1d(aln.reference_start, aln.reference_end, guide_start_pos, guide_end_pos) # determine if overlaps Cas9 construct inside = True if overlap > 0 else False # make sure strand is correct if aln.is_reverse: strand_agreeement = False else: strand_agreeement = True # get alignement quality # aln.mapapping_quality mapping_quality = np.mean(aln.query_alignment_qualities) # get cell index cell = dict(aln.get_tags())['XC'] # get molecule index molecule = dict(aln.get_tags())['XM'] reads = reads.append(pd.Series([chrom, cell, molecule, distance, overlap, inside, mapping_quality, strand_agreeement]), ignore_index=True) reads.columns = ["chrom", "cell", "molecule", "distance", "overlap", "inside", "mapping_quality", "strand_agreeement"] return reads def plot_reads_in_constructs(reads): # Inspect fig, axis = plt.subplots(2, sharex=True) # number of barcode reads per cell sns.distplot(np.log2(1 + reads.groupby(["cell"]).apply(len)), ax=axis[0], kde=False) axis[0].set_xlabel("Reads (log2(1 + x))") # number of unique barcode reads per cell sns.distplot(np.log2(1 + reads.groupby(["cell"])['molecule'].apply(set).apply(len)), ax=axis[1], kde=False) axis[1].set_xlabel("Molecules (log2(1 + x))") sns.despine(fig) fig.savefig(os.path.join(output_dir, "barcodes_per_cell.svg"), bbox_inches="tight") # process u = reads.groupby( ['cell', 'molecule', 'chrom'])[ 'distance', 'overlap', 'inside', 'mapping_quality', 'strand_agreeement'].max().reset_index() # further reduce molecules by solving chromosome conflicts (assign molecule to chromosome with maximum overlap) uu = u.ix[u.groupby(['cell', 'molecule']).apply(lambda x: x['overlap'].argmax())] # efficiency of reads in gRNA vs whole construct inside_fraction = u.groupby(["cell"])['inside'].sum() / u.groupby(["cell"]).apply(len) fig, axis = plt.subplots(1) sns.distplot(inside_fraction, bins=20, kde=False) axis.set_xlabel("Ratio molecules overlap gRNA / total") sns.despine(fig) fig.savefig(os.path.join(output_dir, "barcodes_per_cell.grna_reads_vs_whole_construct.svg"), bbox_inches="tight") # efficiency of reads in gRNA vs whole construct vs number of captured gRNA molecules inside_fraction = u.groupby(["cell"])['inside'].sum() / u.groupby(["cell"]).apply(len) sns.jointplot(u.groupby(["cell"]).apply(len), u.groupby(["cell"])['inside'].sum()) axis.set_xlabel("Total molecules in construct per cell") axis.set_xlabel("Molecules overlapping gRNA per cell") plt.savefig(os.path.join(output_dir, "barcodes_per_cell.all_reads_vs_total_reads.svg"), bbox_inches="tight") plt.close("all") # remove no overlaps and reads in wrong strand u = uu[(uu['overlap'] > 0) & (uu['strand_agreeement'] == 1)] # number of unique barcode reads saying inside per cell fig, axis = plt.subplots(1) sns.distplot(np.log2(1 + u.groupby(["cell"])['molecule'].apply(len)), ax=axis, kde=False) axis.set_xlabel("Molecules (log2(1 + x))") sns.despine(fig) fig.savefig(os.path.join(output_dir, "barcodes_per_cell.inside.svg"), bbox_inches="tight") # concordance between reads in same cell concordant_fraction = 1. / u.groupby(["cell"])['chrom'].nunique() fig, axis = plt.subplots(1) sns.distplot(concordant_fraction, kde=False) axis.set_xlabel("Ratio molecules overlap gRNA / total") sns.despine(fig) fig.savefig(os.path.join(output_dir, "barcodes_per_cell.concordance.svg"), bbox_inches="tight") if reads['chrom'].str.contains("Filler_1").any(): # distribution of reads regarding constructs (for each guide) g = sns.FacetGrid(u, col="chrom", sharex=False, sharey=False) g.map(sns.distplot, 'distance', kde=False) sns.despine(fig) g.fig.savefig(os.path.join(output_dir, "barcodes_per_cell.distance.svg"), bbox_inches="tight") # For all guides plot distribution inside and out fig, axis = plt.subplots(2, len(set(u["chrom"])), sharex=False, sharey=False, figsize=(16, 8)) axis = iter(axis.flatten()) for inside in [1.0, 0.0]: for chrom in set(u["chrom"]): ax = axis.next() ax.set_title(chrom) ax.set_ylabel("Inside" if inside else "Outside") subset = u[(u["chrom"] == chrom) & (u["inside"] == inside)] if subset.shape[0] > 1: sns.distplot(subset['distance'], kde=False, ax=ax) sns.despine(fig) fig.savefig(os.path.join(output_dir, "barcodes_per_cell.distance.in_out.svg"), bbox_inches="tight") # For all guides plot distribution inside and out fig, axis = plt.subplots(2, len(set(u["chrom"])), sharex=False, sharey=False, figsize=(16, 8)) axis = iter(axis.flatten()) for inside in [1.0, 0.0]: for chrom in set(u["chrom"]): ax = axis.next() ax.set_title(chrom) ax.set_ylabel("Inside" if inside else "Outside") subset = u[(u["chrom"] == chrom) & (u["inside"] == inside)] if subset.shape[0] > 1: sns.distplot(subset['overlap'], kde=False, ax=ax) sns.despine(fig) fig.savefig(os.path.join(output_dir, "barcodes_per_cell.overlap.in_out.svg"), bbox_inches="tight") def make_assignment(reads, guide_annotation): # Assign # unique reads per cell # reduce molecules u = reads.groupby( ['cell', 'molecule', 'chrom'])[ 'distance', 'overlap', 'inside', 'mapping_quality', 'strand_agreeement'].max().reset_index() # further reduce molecules by solving chromosome conflicts (assign molecule to chromosome with maximum overlap) uu = u.ix[u.groupby(['cell', 'molecule']).apply(lambda x: x['overlap'].argmax())] # remove marginal overlaps and reads in wrong strand u = uu[(uu['overlap'] > 0) & (uu['strand_agreeement'] == 1)] # Get a score (sum of bp covered) scores = u.groupby(["cell", 'chrom'])['overlap'].sum() scores = scores.reset_index().pivot_table("overlap", "cell", "chrom").fillna(0) # assign (get max) scores["assignment"] = scores.apply(np.argmax, axis=1) scores["score"] = scores[list(set(reads['chrom']))].apply(max, axis=1) # give nan to cells with no overlap (this is because argmax pickus a draw) scores.loc[scores['score'] == 0, 'assignment'] = pd.np.nan scores.loc[scores['score'] == 0, 'score'] = pd.np.nan # Get only assigned cells scores = scores.dropna() # concordance between reads in same cell scores['concordance_ratio'] = scores.drop(["assignment", "score"], axis=1).apply( lambda x: x / sum(x), axis=1).max(axis=1) # Get assigned cells assignment = scores.reset_index()[["cell", "assignment", "score", "concordance_ratio"]] # Convert to coverage in X times (divide by length of gRNA) coverage = scores.drop(['assignment', 'score'], axis=1).apply( lambda x: x / float(len(guide_annotation[guide_annotation["oligo_name"] == x.name]["sequence"].squeeze())), axis=0) coverage["maxscore"] = coverage.apply(max, axis=1) coverage["assignment"] = coverage.drop("maxscore", axis=1).apply(np.argmax, axis=1) return scores, assignment, coverage def plot_assignments(scores, assignment, coverage): # If number of gRNAs in libarary is less than 20, plot each in a panel separately, else plot all together if scores.shape[1] < 22: extras = {"col": "assignment", "col_wrap": 4} else: extras = {} # Plot bp covered per cell g = sns.FacetGrid(scores[["assignment", "score"]], sharex=False, sharey=False, **extras) g.map(sns.distplot, 'score', kde=False) g.set(xlim=(0, 1000)) sns.despine(g.fig) g.fig.savefig(os.path.join(output_dir, "barcodes_per_cell.bp_covered.svg"), bbox_inches="tight") # transform covereage to log cov = coverage[["assignment", "maxscore"]] cov.loc[:, 'maxscore'] = np.log2(cov['maxscore']) try: g = sns.FacetGrid(cov, sharex=False, sharey=False, **extras) g.map(sns.distplot, 'maxscore', bins=100, kde=False) for a in g.axes.flatten(): a.set_xlabel("log2 coverage") sns.despine(g.fig) g.fig.savefig(os.path.join(output_dir, "barcodes_per_cell.coverage.svg"), bbox_inches="tight") except: pass g = sns.FacetGrid(scores, sharex=True, sharey=False, **extras) g.map(sns.distplot, 'concordance_ratio', kde=False) sns.despine(g.fig) g.fig.savefig(os.path.join(output_dir, "barcodes_per_cell.score_concordance_ratio.svg"), bbox_inches="tight") # plot assignment stats c = assignment['assignment'].value_counts() # .replace(pd.np.nan, "None") fig, axis = plt.subplots(1) sns.barplot(c.index, c.values, ax=axis) axis.set_xticklabels(axis.get_xticklabels(), rotation=90) sns.despine(fig) fig.savefig(os.path.join(output_dir, "barcodes_per_cell.identified.svg"), bbox_inches="tight") melted_scores = pd.melt(scores.reset_index(), ['cell', 'assignment', 'score', 'concordance_ratio']) g = sns.FacetGrid(melted_scores, sharex=False, sharey=False, **extras) g.map(sns.distplot, 'value', bins=200, kde=False) sns.despine(fig) g.fig.savefig(os.path.join(output_dir, "barcodes_per_cell.scores.distibution.svg"), bbox_inches="tight") # calculate abs amount of basepairs overlaping the gRNA of the assigned cell vs all others overlap_per_cell = reads.groupby(["cell"])['overlap'].sum() overlap_per_guide = reads.groupby(["cell", "chrom"])['overlap'].sum() overlap_assignment = scores.apply(lambda x: overlap_per_guide.ix[x.name, x['assignment']] if not pd.isnull(x['assignment']) else pd.np.nan, axis=1) overlap_others = overlap_per_cell - overlap_assignment sns.jointplot(overlap_others, overlap_assignment, alpha=0.1) plt.savefig(os.path.join(output_dir, "duplets_assignment_overlap.svg"), bbox_inches="tight") plt.close("all") sns.jointplot(overlap_others, np.log2(1 + overlap_assignment), alpha=0.1) plt.savefig(os.path.join(output_dir, "duplets_assignment_overlap.ylog.svg"), bbox_inches="tight") plt.close("all") sns.jointplot(np.log2(1 + overlap_others), np.log2(1 + overlap_assignment), alpha=0.1) plt.savefig(os.path.join(output_dir, "duplets_assignment_overlap.bothlog.svg"), bbox_inches="tight") plt.close("all") sns.jointplot(overlap_others, overlap_assignment, xlim=(-100, overlap_assignment.max() + 100), ylim=(-100, overlap_assignment.max() + 100), alpha=0.1) plt.savefig(os.path.join(output_dir, "duplets_assignment_overlap.lims.svg"), bbox_inches="tight") plt.close("all") # Start project, add samples prj = Project(os.path.join("metadata", "config.yaml")) # only used in older versions of looper # prj.add_sample_sheet() # get guide annotation guide_annotation = pd.read_csv(os.path.join("metadata", "guide_annotation.csv")) for sample in [s for s in prj.samples if hasattr(s, "replicate")]: # [s for s in prj.samples if hasattr(s, "replicate")] output_dir = os.path.join(sample.paths.sample_root, "gRNA_assignment") # select gRNAs in respective sample library sel_guide_annotation = guide_annotation[guide_annotation['library'] == sample.grna_library] # read in alignments bam = os.path.join(sample.paths.sample_root, "star_gene_exon_tagged.clean.bam") reads = get_reads_in_construct(bam, sel_guide_annotation) reads.to_csv(os.path.join(output_dir, "guide_cell_quantification.csv"), index=False) reads = pd.read_csv(os.path.join(output_dir, "guide_cell_quantification.csv")) # reads in cas9 construct cas9_reads = get_reads_in_Cas9_construct(bam) cas9_reads.to_csv(os.path.join(output_dir, "cas9_quantification.reads.csv"), index=False) cas9_expression = cas9_reads.groupby(['cell'])['molecule'].apply(np.unique).apply(len) cas9_expression.reset_index().to_csv(os.path.join(output_dir, "cas9_quantification.counts.csv"), index=False) # assign scores, assignment, coverage = make_assignment(reads, sel_guide_annotation) scores.to_csv(os.path.join(output_dir, "guide_cell_scores.csv"), index=True) assignment.to_csv(os.path.join(output_dir, "guide_cell_assignment.csv"), index=False) coverage.to_csv(os.path.join(output_dir, "guide_cell_coverage.csv"), index=True) scores = pd.read_csv(os.path.join(output_dir, "guide_cell_scores.csv"), index_col=0) assignment = pd.read_csv(os.path.join(output_dir, "guide_cell_assignment.csv")) coverage = pd.read_csv(os.path.join(output_dir, "guide_cell_coverage.csv"), index_col=0) # Plots # reads along constructs plot_reads_in_constructs(reads) # assignment quality/coverage plot_assignments(scores, assignment, coverage) # # # Figure 1g from itertools import chain colors = sns.color_palette("colorblind") u6 = prj['crop-seq']['u6'] rest = prj['crop-seq']['rest'] for sample in [s for s in prj.samples if s.name == "CROP-seq_HEK293T_1_resequenced"]: # read in read/construct overlap information reads = pd.read_csv(os.path.join(sample.paths.sample_root, "gRNA_assignment", "guide_cell_quantification.csv")) # process u = reads.groupby( ['cell', 'molecule', 'chrom'])[ 'read_start', 'read_end', 'distance', 'overlap', 'inside', 'mapping_quality', 'strand_agreeement'].max().reset_index() # further reduce molecules by solving chromosome conflicts (assign molecule to chromosome with maximum overlap) uu = u.ix[u.groupby(['cell', 'molecule']).apply(lambda x: x['overlap'].argmax())] # remove no overlaps and reads in wrong strand u = uu[uu['strand_agreeement'] == 1] # select gRNAs in respective sample library sel_guide_annotation = guide_annotation[guide_annotation['library'] == sample.grna_library] reads2 = u.copy() # normalize filler length to match start/end of gRNA filler_length = len(sel_guide_annotation.loc[sel_guide_annotation['oligo_name'] == 'Filler_1', 'sequence'].squeeze()) reads2.loc[ (reads2["chrom"] == "Filler_1") & (reads2["read_start"] > len(u6) + 20), "read_start"] -= filler_length reads2.loc[ (reads2["chrom"] == "Filler_1") & (reads2["read_end"] > len(u6) + 20), "read_end"] -= filler_length # Stacked frequencies of read sequences read_data = list() for chrom in reads2['chrom'].drop_duplicates(): if chrom == "Filler_1": continue read_data.append( list(list(chain.from_iterable( reads2[(reads2["inside"] == 1) & (reads2["chrom"] == chrom)].apply( lambda x: range(int(x["read_start"]), int(x["read_end"])), axis=1).values)))) read_data.append(list(list(chain.from_iterable(reads2[reads2["inside"] != 1].apply(lambda x: range(int(x["read_start"]), int(x["read_end"])), axis=1).values)))) fig, axis = plt.subplots(1, 1, sharex=True) axis.hist( read_data, bins=range(0, len(u6) + 20 + len(rest), 10), histtype='barstacked', normed=False, color=colors[:3] + ["grey"]) for coor, name in [(0, "startU6"), (len(u6), "start gRNA"), (len(u6) + 20, "start backbone"), (len(u6) + 20 + len(rest), "start polyA")]: axis.axvline(coor, 0, 1, linewidth=3, color="black", linestyle="--") axis.text(coor, 0.01, name) axis.set_xlim((0, len(u6) + 20 + len(rest) + 50)) sns.despine(fig) fig.savefig(os.path.join("results", "figures", "fig1g.reads.stacked.svg"), bbox_inches="tight")
epigen/crop-seq
src/assign_gRNA_cells.py
assign_gRNA_cells.py
py
19,998
python
en
code
25
github-code
13
8142501106
"""" This file contains functions used in of preprocess downloaded crypto close data """ #python packages from datetime import datetime import pandas as pd import yfinance as yf import pandas as pd '''Function:datecheck this function is used to check to make sure that the last date is not tomorrows date. Yahoo Finance may pull data from a timezone that is in the future for some crypto close data.''' def datecheck(df): #Get todays date and convert to datetime format ymd today = datetime.today() #Get last date of dataframe last_dt = pd.to_datetime(df['ds'].tail(1)) #test if last date is greater or equal to current datetime test_dt = last_dt <= today #if statement to remove last record from df given the last date is greater than current date if test_dt.bool() == True: return(df) else: df.drop(df.tail(1).index, inplace=True) return(df) #Function to preprocess market data and format to prophet specific format def preprocess(file): #Convert download file to time series of date and close file_ts = file[['Date','Close']] #rename columns 'ds' = datestamp and Y = Y value for prophet file_ts = file_ts.rename(columns={"Date": "ds", "Close": "y"}) #Drop Na values from file df = file_ts.dropna(axis=0) return(df)
MattChinchilla/DATA_SCIENCE
TimeSeries/prophet_funcs_v1.py
prophet_funcs_v1.py
py
1,325
python
en
code
0
github-code
13
44692179396
#_author: #date: import socket import subprocess # 创建socket对象 sk=socket.socket() # 为socket对象提供ip地址和端口,然后绑定 adress=("127.0.0.1",8000) sk.bind(adress) # 监听设置端口 等待客户端的请求 sk.listen(2) while True: print("waiting.....") conn, addr = sk.accept() print(addr) while True: try: data=conn.recv(1024) except Exception: break if not data: break # 将子进程转到主进程,并将执行结果存入obj对象内 obj=subprocess.Popen(str(data,"utf8"),shell=True,stdout=subprocess.PIPE) # obj对象内存储的执行结果读出 cmd_result=obj.stdout.read() result_len=bytes(str(len(cmd_result)),"utf8") conn.sendall(result_len) conn.sendall(cmd_result)
liangliang115715/pythonStudyNote
studyNote/python-2/cmd_serve.py
cmd_serve.py
py
778
python
en
code
0
github-code
13
17038567824
#!/usr/bin/env python # -*- coding: utf-8 -*- import json from alipay.aop.api.constant.ParamConstants import * class AlipayCommerceTransportIntelligentizeDataSyncModel(object): def __init__(self): self._data = None self._data_type = None self._request_id = None self._sync_type = None @property def data(self): return self._data @data.setter def data(self, value): self._data = value @property def data_type(self): return self._data_type @data_type.setter def data_type(self, value): self._data_type = value @property def request_id(self): return self._request_id @request_id.setter def request_id(self, value): self._request_id = value @property def sync_type(self): return self._sync_type @sync_type.setter def sync_type(self, value): self._sync_type = value def to_alipay_dict(self): params = dict() if self.data: if hasattr(self.data, 'to_alipay_dict'): params['data'] = self.data.to_alipay_dict() else: params['data'] = self.data if self.data_type: if hasattr(self.data_type, 'to_alipay_dict'): params['data_type'] = self.data_type.to_alipay_dict() else: params['data_type'] = self.data_type if self.request_id: if hasattr(self.request_id, 'to_alipay_dict'): params['request_id'] = self.request_id.to_alipay_dict() else: params['request_id'] = self.request_id if self.sync_type: if hasattr(self.sync_type, 'to_alipay_dict'): params['sync_type'] = self.sync_type.to_alipay_dict() else: params['sync_type'] = self.sync_type return params @staticmethod def from_alipay_dict(d): if not d: return None o = AlipayCommerceTransportIntelligentizeDataSyncModel() if 'data' in d: o.data = d['data'] if 'data_type' in d: o.data_type = d['data_type'] if 'request_id' in d: o.request_id = d['request_id'] if 'sync_type' in d: o.sync_type = d['sync_type'] return o
alipay/alipay-sdk-python-all
alipay/aop/api/domain/AlipayCommerceTransportIntelligentizeDataSyncModel.py
AlipayCommerceTransportIntelligentizeDataSyncModel.py
py
2,334
python
en
code
241
github-code
13
8863765236
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Fri Sep 18 19:46:04 2020 @author: josegrau """ #xor classification problem with neural network import numpy as np import matplotlib.pyplot as plt #no usamos softmax al ser clasificación binaria #usamos en ambas capas la función sigmoide como activación def forward(X, W1, b1, W2, b2): Z = 1 / (1 + np.exp(-(X.dot(W1) + b1))) a = Z.dot(W2) + b2 Y = 1 / (1 + np.exp(-a)) return Y, Z #Para predecir, en vez del máximo, basta redondear la función forward def predict(X, W1, b1, W2, b2): Y, _ = forward(X, W1, b1, W2, b2) return np.round(Y) def derivative_w2(Z, T, Y): return (T - Y).dot(Z) def derivative_b2(T, Y): return (T - Y).sum() def derivative_w1(X, Z, T, Y, W2): dZ = np.outer(T - Y, W2) * (1 - Z * Z) return X.T.dot(dZ) def derivative_b1(Z, T, Y, W2): dZ = np.outer(T - Y, W2) * (1 - Z * Z) return dZ.sum(axis=0) #cross-entropy def cost(T, Y): tot = 0 for n in range(len(T)): if T[n] == 1: tot += np.log(Y[n]) else: tot += np.log(1 - Y[n]) return tot #Y aquí el main X = np.array([[0,0],[1,0],[0,1],[1,1]]) Y = np.array([0,1,1,0]) W1 = np.random.randn(2,4) b1 = np.random.randn(4) W2 = np.random.randn(4) b2 = np.random.randn(1) costs = [] learning_rate = 0.0005 regularization = 0. last_error_rate = None for i in range(100000): pY, Z = forward(X, W1, b1, W2, b2) c = cost(Y, pY) prediction = predict(X, W1, b1, W2, b2) er = (prediction - Y).mean() if er != last_error_rate: last_error_rate = er print("Error rate = ", er) print("Valor real: ", Y) print("Predicción: ", prediction) if costs and c < costs[-1]: print("Early Exit") break costs.append(c) W2 += learning_rate*(derivative_w2(Z, Y, pY)-regularization*W2) b2 += learning_rate*(derivative_b2(Y, pY)-regularization*b2) W1 += learning_rate*(derivative_w1(X,Z,Y,pY,W2)-regularization*W1) b1 += learning_rate*(derivative_b1(Z,Y,pY,W2)-regularization*b1) if i %10000 == 0: print(c) print("Final classification rate: ", 1-np.abs(prediction-Y).mean()) plt.plot(costs)
JoseGrau/Curso_Deep_Learning_Python
CursoDeepLearningPython/xor.py
xor.py
py
2,254
python
en
code
0
github-code
13
16027326694
def print_subset(bit, arr, n): total = 0 for i in range(n): if bit[i]: total += arr[i] print(bit, total) arr = [1, 2, 3, 4] bit = [0, 0, 0, 0] for i in range(2): bit[0] = i for j in range(2): bit[1] = j for k in range(2): bit[2] = k for l in range(2): bit[3] = l # print_subset(bit, arr, 4) arr = [1, 2, 3, 4, 5, 6] n = len(arr) count= 0 for i in range(2**n): # 2의 n제곱 번 순회 print('현재 집합 :', end=' ') for j in range(n): # arr에 있는 인덱스 하나씩 대입 if i & (1<<j): # i 숫자를 2진수로 바꿨을 때 010100 일 때 2, 4만 포함 # 왜냐하면 (1<<j)가 000001을 왼쪽으로 하나씩 밀어낸 2진수 print(arr[j], end=", ") print() count += 1 # 조합마다 count 1씩 증가 print() print('count:', count)
joonann/ProblemSolving
python/202308/03/test/test.py
test.py
py
926
python
ko
code
0
github-code
13
29863016307
from intelmq.lib.bot import OutputBot from intelmq.lib.test import BotTestCase from unittest import TestCase from json import dumps RAW = {"__type": "Event", "raw": "Cg=="} DICT = {"foo": "bar", "foobar": 1} OUTPUT_DICT = {"__type": "Event", "output": dumps(DICT, sort_keys=True)} STRING = "foobar!" OUTPUT_STRING = {"__type": "Event", "output": dumps(STRING)} INT = 123 OUTPUT_INT = {"__type": "Event", "output": dumps(INT)} INPUT = {"__type": "Event", "raw": "Cg==", "source.ip": "127.0.0.1"} RAW_HIERARCHICAL = {"raw": "Cg==", "source": {"ip": "127.0.0.1"}} NO_RAW_TYPE = {"__type": "Event", "source.ip": "127.0.0.1"} class DummyOutputBot(OutputBot): def process(self): event = self.receive_message() self.result = self.export_event(event, return_type=self.return_type) self.acknowledge_message() class TestDummyOutputBot(BotTestCase, TestCase): @classmethod def set_bot(cls): cls.sysconfig = {"return_type": None} cls.bot_reference = DummyOutputBot cls.default_input_message = RAW cls.allowed_error_count = 1 def test_export_raw(self): self.run_bot(parameters={"single_key": "raw"}) self.assertEqual(self.bot.result, "\n") def test_export_output_dict(self): self.input_message = OUTPUT_DICT self.run_bot(parameters={"single_key": "output"}) self.assertEqual(self.bot.result, DICT) def test_export_output_dict_string(self): self.input_message = OUTPUT_DICT self.run_bot(parameters={"single_key": "output", "return_type": str}) self.assertEqual(self.bot.result, OUTPUT_DICT['output']) def test_export_output_string(self): self.input_message = OUTPUT_STRING self.run_bot(parameters={"single_key": "output"}) self.assertEqual(self.bot.result, STRING) def test_export_output_string_string(self): self.input_message = OUTPUT_STRING self.run_bot(parameters={"single_key": "output", "return_type": str}) self.assertEqual(self.bot.result, STRING) def test_export_output_int(self): self.input_message = OUTPUT_INT self.run_bot(parameters={"single_key": "output"}) self.assertEqual(self.bot.result, INT) def test_export_output_int_string(self): self.input_message = OUTPUT_INT self.run_bot(parameters={"single_key": "output", "return_type": str}) self.assertEqual(self.bot.result, OUTPUT_INT['output']) def test_export_keep_raw_hierarchical(self): self.input_message = INPUT self.run_bot(parameters={"keep_raw_field": True, "message_hierarchical": True, "message_with_type": False, }) self.assertEqual(self.bot.result, RAW_HIERARCHICAL) def test_export_keep_raw_hierarchical_string(self): self.input_message = INPUT self.run_bot(parameters={"keep_raw_field": True, "message_hierarchical": True, "message_with_type": False, "return_type": str, }) self.assertEqual(self.bot.result, dumps(RAW_HIERARCHICAL, sort_keys=True)) def test_export_now_raw_type(self): self.input_message = INPUT self.run_bot(parameters={"keep_raw_field": False, "message_with_type": True, }) self.assertEqual(self.bot.result, NO_RAW_TYPE)
certtools/intelmq
intelmq/tests/lib/test_bot_output.py
test_bot_output.py
py
3,605
python
en
code
856
github-code
13
21042170146
from django import forms from django.core.exceptions import ValidationError from django.core.validators import RegexValidator from django.http import HttpResponse, HttpResponseRedirect from django.shortcuts import render, redirect from django.utils.safestring import mark_safe from openpyxl import load_workbook from pandas import read_csv from django.shortcuts import render import hashlib def setPassword(password): md5 = hashlib.md5() md5.update(password.encode()) result = md5.hexdigest() return result # Create your views here. from app import models def denglu(request): return render(request, "denglu.html") def admin(request): print(request.session) if request.session.keys(): return render(request, "admin.html") else: return redirect("/denglu/") def banji(request): if request.session.keys(): pus = models.Userment.objects.all() return render(request, "banji.html", {"data": pus}) else: return redirect("/denglu/") def student_add(request): if request.session.keys(): if request.method == "GET": return render(request, "student_add.html") else: id = request.POST.get("id") name = request.POST.get("name") gender = request.POST.get("gender") math = request.POST.get("math") python = request.POST.get("python") C = request.POST.get("C") coun = math + python + C models.Userment.objects.create(id=id, name=name, gender=gender, math=math, python=python, C=C, coun=coun) return redirect("/admin/") else: return redirect("/denglu/") def student_cha(request): if request.session.keys(): if request.method == "GET": return render(request, "student_cha.html") else: id = request.POST.get("id") name = request.POST.get("name") gender = request.POST.get("gender") math = request.POST.get("math") python = request.POST.get("python") C = request.POST.get("C") coun = math + python + C models.Userment.objects.filter(id=id).update(name=name, gender=gender, math=math, python=python, C=C, coun=coun) return redirect("/admin/") else: return redirect("/denglu/") def student_dele(request): if request.session.keys(): if request.method == "GET": return render(request, "student_dele.html") else: id = request.POST.get('id') print(id) models.Userment.objects.filter(id=id).delete() return redirect("/admin/") else: return redirect("/denglu/") def deng(request,str): if request.session.keys(): return render(request, str) else: return redirect("/denglu/") def gushi1(request): return deng(request, "gushi1.html") def gushi2(request): return deng(request, "gushi2.html") def gushi3(request): return deng(request, "gushi3.html") def gushi4(request): return deng(request, "gushi4.html") """传输照片""" def phon(request): if request.session.keys(): if request.method == "GET": return render(request, 'phone.html') print(request.POST) file_object = request.FILES.get("tlg") print(file_object) name = file_object.name f = open("app/static/app/" + name, mode='wb') for i in file_object.chunks(): f.write(i) return redirect("/admin/") else: return redirect("/denglu/") """调查表""" def diaocha(request): if request.session.keys(): if request.method == "GET": return render(request, "调查.html") else: q1_1 = request.POST.get("q1_1") q1_2 = request.POST.get("q1_2") q1_3 = request.POST.get("q1_3") # 问题 2 q2_1 = request.POST.get("q2_1") q2_2 = request.POST.get("q2_2") q2_3 = request.POST.get("q2_3") q2_4 = request.POST.get("q2_4") # 问题 3 q3_1 = request.POST.get("q3_1") q3_2 = request.POST.get("q3_2") q3_3 = request.POST.get("q3_3") q3_4 = request.POST.get("q3_4[]") q3_5 = request.POST.get("q3_5[]") # 问题 4 q4_1 = request.POST.get("q4_1") q4_2 = request.POST.get("q4_2") q4_3 = request.POST.get("q4_3[]") q4_4 = request.POST.get("q4_4") q4_5 = request.POST.get("q4_5[]") print(q1_1) models.Survey.objects.create(q1_1=q1_1, q1_2=q1_2, q1_3=q1_3, q2_1=q2_1, q2_2=q2_2, q2_3=q2_3, q2_4=q2_4, q3_1=q3_1, q3_2=q3_2, q3_3=q3_3, q3_4=q3_4, q3_5=q3_5, q4_1=q4_1, q4_2=q4_2, q4_3=q4_3, q4_4=q4_4, q4_5=q4_5 ) return redirect("/admin/") else: return redirect("/denglu/") import hashlib from django.shortcuts import render, redirect from django.contrib.auth import authenticate, login, logout class LoginFrom(forms.Form): username=forms.CharField(label="用户名",widget=forms.TextInput,required=True) password = forms.CharField(label="密码",widget=forms.PasswordInput(render_value=True),required=True) def clean_password(self): pwd=self.cleaned_data.get("password") m = hashlib.md5() m.update(pwd.encode("utf8")) mpwd = m.hexdigest() return mpwd def login_view(request): form=LoginFrom() if request.method == 'POST': form=LoginFrom(data=request.POST) if form.is_valid(): print(form.cleaned_data) ter=models.User.objects.filter(username=form.cleaned_data["username"],password=form.cleaned_data["password"]).first() if not ter: form.add_error("password","用户名或密码错误") return render(request, 'denglu.html', {'form': form}) request.session["info"]= {'id':ter.id,'name':ter.username} return redirect("/admin/") else: return render(request, 'denglu.html',{'form':form}) def xiaren(request): return redirect("/static/app/6_210702183620_1.jpg")
0passion0/project_html1.0
app/views.py
views.py
py
6,430
python
en
code
0
github-code
13
17795197483
import requests #pasa los datos del archivo txt a una lista bidimencional def read_data(): dts=[] with open("files/Clientes.txt") as data: for lines in data: character=lines.replace("\n","") #borra el \n del salto de linea dts.append(character.split(", ")) #los datos estan separados por una coma y un espacio return dts #manda los datos de un diccionario al local host def send_data(data): #se inicializa un diccionario todo={} #url con la direccion con la que trabajamos api_url="http://localhost:8080/apiv1/clients/add" #con un for va cargando los datos al diccionario, al reiniciarse el ciclo, cambian los valores for i in range(len(data)): todo['firstname'] = data[i][0] todo['surname'] = data[i][1] todo['country'] = data[i][2] todo['language'] = data[i][3] todo['airport'] = data[i][4] #mandamos el diccionario de datos a la url response = requests.post(api_url, json=todo) response.json() airport("") country("") language("") def show_data_clients(): api_url="http://localhost:8080/apiv1/clients/listClients" response=requests.get(api_url) data = response.json() return data def new_data(): name=input("Name: ") surname=input("Surname: ") country=input("Country: ") language=input("Language: ") airport=input("airport: ") dts=[name,surname,country,language,airport] todo={} api_url="http://localhost:8080/apiv1/clients/add" todo['firstname'] = dts[0] todo['surname'] = dts[1] todo['country'] = dts[2] todo['language'] = dts[3] todo['airport'] = dts[4] #mandamos el diccionario de datos a la url response = requests.post(api_url, json=todo) response.json() airport(dts[4]) country(dts[2]) language(dts[3]) def airport(airl): data="http://localhost:8080/airport/add" todo={} dta = show_data_clients() print(dta) for i in range(len(dta)): print(dta[i]['airport']) if dta[i]['airport'] != str(airl): todo['arName'] = dta[i]['airport'] response = requests.post(data, json=todo) response.json() def country(country): data="http://localhost:8080/country/add" todo={} dta = show_data_clients() print(dta) for i in range(len(dta)): print(dta[i]['country']) if dta[i]['country'] != str(country): todo['cName'] = dta[i]['country'] response = requests.post(data, json=todo) response.json() def language(language): data="http://localhost:8080/languages/add" todo={} dta = show_data_clients() print(dta) for i in range(len(dta)): print(dta[i]['language']) if dta[i]['language'] != str(language): todo['langName'] = dta[i]['language'] response = requests.post(data, json=todo) response.json()
iamoscarb/python_java
files.py
files.py
py
3,074
python
en
code
0
github-code
13
17093739639
import logging import grpc import scowl_pb2 import scowl_pb2_grpc def getGeneratorID(stub): """Request a 32-bit id. Today, this is a 32-bit hash given an input of an IPv4 address. For the next few decades the number of generators will be relatively low (e.g., hundres to low-thousands), so 32-bits is plenty. For DDOS resistance, the bootstrap server contenates the received IP address with a nonce before hashing. In the future, GeneratorID could use an IPv6 address, of 128-bit hash. This would require secondary loadbalancing logic at the trackers. """ return stub.GeneratorJoin(scowl_pb2.PeerCtx(addr='192.168.0.1')) def getConsumerID(stub): """Request a 128-bit id. Today, this is a 128-bit hash given an input of an IPv4 address. For DDOS resistance, the bootstrap server contenates the received IP address with a nonce before hashing. In the future, ConsumerID could default to a IPv6 address. This would require secondary loadbalancing logic at the trackers. """ return stub.ConsumerJoin(scowl_pb2.PeerCtx(addr='192.168.0.2:3000')) def run(dest_addr='localhost:50051'): # NOTE(gRPC Python Team): .close() is possible on a channel and should be # used in circumstances in which the with statement does not fit the needs # of the code. with grpc.insecure_channel(dest_addr) as channel: stub = scowl_pb2_grpc.BootstrapStub(channel) print("-------------- Generator ID --------------") gen_id = getGeneratorID(stub) print("Received Generator ID: {}".format(gen_id.id)) print("-------------- Consumer ID --------------") consumer_id = getConsumerID(stub) print("Received Consumer ID: {}".format(consumer_id.id)) print("-------------- FIN --------------") if __name__ == '__main__': logging.basicConfig() run()
jamesryancoleman/scowl
bootstrap_client.py
bootstrap_client.py
py
1,926
python
en
code
0
github-code
13
43575393614
import pandas as pd from spotify.spotify_client import SpotifyClient import time import numpy as np tracks_df = pd.read_csv("../output/spotify_artists_albums_tracks_output_full.csv") print(f'{len(tracks_df)} tracks before deduplication') tracks_df = tracks_df.drop_duplicates(subset=['track uri']) print(f'{len(tracks_df)} tracks after deduplication') artists_names_nd = tracks_df['artist name'].values artists_uris_nd = tracks_df['artist uri'].values albums_names_nd = tracks_df['album name'].values albums_uris_nd = tracks_df['album uri'].values tracks_names_nd = tracks_df['track name'].values tracks_uris_nd = tracks_df['track uri'].values track_count_limit = 50 client = SpotifyClient() result = [] start_batch_index = 0 batch_index = start_batch_index iterations_count = int(len(tracks_df.index) / track_count_limit) added_column_names = ['danceability', 'energy', 'loudness', 'speechiness', 'acousticness', 'instrumentalness', 'liveness', 'valence', 'tempo', 'duration'] full_column_names = tracks_df.columns.tolist() + added_column_names partial_column_names = [name for name in full_column_names if name not in ['artist uri', 'album uri', 'track uri']] for lower_index in range(start_batch_index, len(tracks_df.index), track_count_limit): batch_index = batch_index + 1 artists_names_batch = artists_names_nd[lower_index:lower_index + track_count_limit, ] artists_uris_batch = artists_uris_nd[lower_index:lower_index + track_count_limit, ] albums_names_batch = albums_names_nd[lower_index:lower_index + track_count_limit, ] albums_uris_batch = albums_uris_nd[lower_index:lower_index + track_count_limit, ] tracks_names_batch = tracks_names_nd[lower_index:lower_index + track_count_limit, ] tracks_uris_batch = tracks_uris_nd[lower_index:lower_index + track_count_limit, ] while True: try: tracks_features = client.fetch_audio_features(tracks_uris_batch) for track_index, track_features in enumerate(tracks_features): result.append( [artists_names_batch[track_index], artists_uris_batch[track_index], albums_names_batch[track_index], albums_uris_batch[track_index], tracks_names_batch[track_index], tracks_uris_batch[track_index]] + track_features ) if batch_index % 1000 == 0: print(f'DONE {batch_index} OUT OF {iterations_count} TRACK BATCHES\t{time.ctime(int(time.time()))}') result_nd = np.array(result) result_df = pd.DataFrame(result_nd, columns=full_column_names) with open(f'../temp/spotify_artists_albums_tracks_features_output_full_{batch_index}.csv', 'a', encoding='utf-8') as file: result_df.to_csv(file, header=False, index=False, encoding='utf-8') result = [] except: continue break
Haydart/MusicRecommender
spotify/extract_features_pipeline.py
extract_features_pipeline.py
py
2,933
python
en
code
0
github-code
13
24101544716
import RPi.GPIO as GPIO import matplotlib.pyplot as plt import time dac = [26, 19, 13, 6, 5, 11, 9, 10] leds = [21, 20, 16, 12, 7, 8, 25, 24] value_list = [] def dec2bin(dec): return [int(bit) for bit in bin(dec)[2:].zfill(8)] def dec2leds(dec): GPIO.output(leds,dec2bin(dec)) def adc(): ans = 0 for i in range(8): ans = ans + 2**(7-i) dec2leds(ans) time.sleep(0.001) if GPIO.input(4) == 0: ans = ans - 2**(7-i) return ans GPIO.setmode(GPIO.BCM) GPIO.setup(leds, GPIO.OUT, initial = GPIO.LOW) GPIO.setup(17, GPIO.OUT) GPIO.setup(4, GPIO.IN) try: begin = time.time() GPIO.output(17, 1) print("Конденсатор заряжается") while adc() < 250: value_list.append(adc()) dec2leds(value_list[-1]) print(value_list[-1]) GPIO.output(17, GPIO.LOW) print("Конденсатор разряжается") #while adc() > 1: for i in range(10): value_list.append(adc()) dec2leds(value_list[-1]) print(value_list[-1]) duration = time.time() - begin print("Duration = {:.3f} sec".format(duration)) print("Sampling frequency = {:.1f} Hz".format(len(value_list)/duration)) print("Period = {:.3f} sec".format(1/(len(value_list)/duration))) print("Voltage step = {:.3f} V".format(3.3/255)) plt.plot(value_list) plt.show() value_list_str = [str(item) for item in value_list] with open("data.txt", "w") as data: data.write("\n".join(value_list_str)) with open("settings.txt", "w") as settings: settings.write("Duration = " + "".join(str(duration)) + " sec" + "\n") settings.write("Sampling frequency = " + "".join(str(len(value_list)/duration)) + " Hz" + "\n") settings.write("Period = " + "".join(str(1/(len(value_list)/duration))) + " sec" + "\n") settings.write("Voltage step = " + "".join(str(3.3/255)) + " V") finally: GPIO.output(leds + [17], GPIO.LOW) GPIO.cleanup()
MrDoodler007/volkov-repo
Zameri/Zamerii.py
Zamerii.py
py
2,026
python
en
code
0
github-code
13
28989683683
""" 线程Event 同步互斥 """ from threading import Event from threading import Thread s = None # 用于通信 e = Event() # 事件对象 def fun01(): print("杨子荣前来拜山头") global s s = '天王盖地虎' e.set() # 操作完成共享 e 设置 # 创建线程对象 t = Thread(target=fun01) t.start() print("说对口令就是自己人") e.wait() # 阻塞等待 if s == '天王盖地虎': print("宝塔镇河妖") print("口令正确") else: print("嫩死他") t.join()
SmileAnage/Thread
thread_event.py
thread_event.py
py
524
python
en
code
0
github-code
13
34738643999
from __future__ import print_function import sys from ortools.linear_solver import pywraplp from collections import namedtuple import math import numpy as np from timeit import default_timer as timer def eprint(*args, **kwargs): print(*args, file=sys.stderr, **kwargs) Point = namedtuple("Point", ['x', 'y']) Facility = namedtuple( "Facility", ['index', 'setup_cost', 'capacity', 'location']) Customer = namedtuple("Customer", ['index', 'demand', 'location']) def length(point1, point2): return math.sqrt((point1.x - point2.x) ** 2 + (point1.y - point2.y) ** 2) class Solution: def __init__(self): self.attendance: list[int] = [] self.value: float = 0.0 self.optimal: bool = False def __repr__(self): result = f'{self.value} {int(self.optimal)}\n' result += ' '.join(map(str, self.attendance)) return result class Solver: def __init__(self, facilities: list[Facility], customers: list[Customer]): self.facilities = facilities self.customers = customers self.n_facilities = len(facilities) self.n_customers = len(customers) self.__init_distances() def __init_distances(self): self.__DISTANCE_TO_CUSTOMERS = np.zeros( (self.n_facilities, self.n_customers)) self.__DISTANCE_TO_FACILITIES = np.zeros( (self.n_facilities, self.n_facilities)) # Compute distances from facilities to customers for f in range(self.n_facilities): for c in range(self.n_customers): self.__DISTANCE_TO_CUSTOMERS[f][c] = length( self.facilities[f].location, self.customers[c].location) for other_f in range(self.n_facilities): self.__DISTANCE_TO_FACILITIES[f][other_f] = length( self.facilities[f].location, self.facilities[other_f].location) # Create nearest neighbors matrix self.__NEAREST_NEIGHBORS = [ list(range(self.n_facilities)) for _ in range(self.n_facilities)] for f in range(self.n_facilities): self.__NEAREST_NEIGHBORS[f].sort( key=lambda other_f: self.__DISTANCE_TO_FACILITIES[f][other_f]) def __get_initial_solution(self) -> Solution: solution = Solution() capacities = [f.capacity for f in self.facilities] opened = [False] * self.n_facilities for i, c in enumerate(self.customers): for j, f in enumerate(self.facilities): if capacities[j] >= c.demand: capacities[j] -= c.demand solution.attendance.append(f.index) solution.value += length(c.location, f.location) if not opened[j]: opened[j] = True solution.value += f.setup_cost break else: raise RuntimeError("Data is incorrect") return solution def __find_optimal_solution(self, f, c, max_seconds=60): n_facilities = len(f) n_customers = len(c) # Init MIP solver solver = pywraplp.Solver('SolveIntegerProblem', pywraplp.Solver.CBC_MIXED_INTEGER_PROGRAMMING) # Init variables attendance = [[solver.IntVar(0, 1, f'y_{i}_{j}') for j in range( n_customers)] for i in range(n_facilities)] opened = [solver.IntVar(0, 1, f'opened_{i}') for i in range(n_facilities)] # Add condition: all customers must be served by exactly 1 facility for j in range(n_customers): solver.Add(sum([attendance[i][j] for i in range(n_facilities)]) == 1) # Add condition: a customer must be assigned to an open facility. for i in range(n_facilities): for j in range(n_customers): solver.Add(attendance[i][j] <= opened[i]) # Add condition: sum of demands <= capacity for i in range(n_facilities): solver.Add(sum([self.customers[c[j]].demand * attendance[i][j] for j in range(n_customers)]) <= self.facilities[f[i]].capacity * opened[i]) # Minimizing function objective = solver.Objective() # Objective: sum all the distance. for i in range(n_facilities): for j in range(n_customers): objective.SetCoefficient(attendance[i][j], length(self.facilities[f[i]].location, self.customers[c[j]].location)) # Objective: sum all the setup cost. for j in range(n_facilities): objective.SetCoefficient( opened[j], self.facilities[f[j]].setup_cost) objective.SetMinimization() solver.SetTimeLimit(int(max_seconds * 1000)) status = solver.Solve() if status not in [pywraplp.Solver.FEASIBLE, pywraplp.Solver.OPTIMAL]: return None solution = Solution() solution.value = solver.Objective().Value() solution.attendance = [0] * self.n_customers for i in range(n_customers): for j in range(n_facilities): if int(attendance[j][i].solution_value()) == 1: solution.attendance[c[i]] = f[j] break return solution def solve(self, max_seconds: int = 60 * 60 * 2) -> Solution(): start_time = timer() solution = self.__get_initial_solution() eprint(f'Initial value: {solution.value}') while True: # Pick one facility and choose k nearest to it pivot = np.random.choice(self.n_facilities) K_NEAREST = 50 picked_facilities = self.__NEAREST_NEIGHBORS[pivot][:K_NEAREST] set_picked = set(picked_facilities) # Pick customers that are served by picked facilities picked_customers = [c for c in range( self.n_customers) if solution.attendance[c] in set_picked] # Compute old value old_value = 0 opened = set(solution.attendance) for f in picked_facilities: if f in opened: old_value += self.facilities[f].setup_cost for c in picked_customers: old_value += self.__DISTANCE_TO_CUSTOMERS[solution.attendance[c]][c] # Run MIP solver update = self.__find_optimal_solution(picked_facilities, picked_customers) if update is None or update.value > old_value: continue for c in picked_customers: solution.attendance[c] = update.attendance[c] solution.value -= old_value - update.value eprint(f'Found new value: {solution.value:.02f}', end='\r') end_time = timer() if end_time - start_time > max_seconds: eprint('Time limit exceeded.') break return solution def parse_data(input_data): lines = input_data.split('\n') parts = lines[0].split() facility_count = int(parts[0]) customer_count = int(parts[1]) facilities = [] for i in range(1, facility_count + 1): parts = lines[i].split() facilities.append(Facility( i - 1, float(parts[0]), int(parts[1]), Point(float(parts[2]), float(parts[3])))) customers = [] for i in range(facility_count + 1, facility_count + 1 + customer_count): parts = lines[i].split() customers.append(Customer( i - 1 - facility_count, int(parts[0]), Point(float(parts[1]), float(parts[2])))) return facilities, customers def solve_it(input_data) -> str: solver = Solver(*parse_data(input_data)) return str(solver.solve()) if __name__ == '__main__': if len(sys.argv) == 2: file_location = sys.argv[1].strip() with open(file_location, 'r') as input_data_file: input_data = input_data_file.read() answer_file = './answers/' + \ '_'.join(input_data.split('\n')[0].split()) with open(answer_file, 'w') as f: f.write(solve_it(input_data)) else: print('This test requires an input file. Please select one from the data directory. ' '(i.e. python solver.py ./data/fl_16_2)')
s0mth1ng/Discrete_Optimization
week6/facility/mip.py
mip.py
py
8,368
python
en
code
0
github-code
13
43061162451
import matplotlib.pyplot as plt import torch import torch.nn import torchvision import numpy as np __all__ = ['show_prob','show_image'] def show_prob(y_tup,y_true): """ :param y_tup: we want to get numpy tuple :param y_true: we want to get numpy array instead of tensor :return: """ num_comp = len(y_tup) #assert num_comp<=4 and num_comp>1, "Error! the number of comparison should be between 1 and 4" num_inst,num_class = y_tup[0].shape for i in range(num_inst): for j in range(num_comp): plt.subplot(1,num_comp,j+1) plt.bar(np.arange(num_class),y_tup[j][i],align='center',width=0.8) plt.title(y_true[i]) plt.show() def show_image(X_tup,tup_name = None, channel_first = True): """ :param X_tup: should be numpy array. :param channel_first: (batch, channel, width,height) :return: """ num_comp = len(X_tup) num_inst = X_tup[0].shape[0] if channel_first: for i in range(num_comp): X_tup[i] = X_tup[i].transpose([0,2,3,1]) for i in range(num_inst): for j in range(num_comp): plt.subplot(1,num_comp,j+1) plt.imshow(X_tup[j][i]) if tup_name is not None: plt.title(tup_name[j]) plt.axis('off') plt.show()
psr6275/adv_kl
utils/visualize.py
visualize.py
py
1,324
python
en
code
1
github-code
13
41490266819
#!/usr/bin/env python # coding: utf-8 # Explain the assumptions required to use ANOVA and provide examples of violations that could impact # the validity of the results. # # ANOVA (Analysis of Variance) is a statistical technique used to test for differences in means between two or more groups. ANOVA is based on several assumptions that need to be met in order for the results to be valid. These assumptions are: # # Independence: The observations in each group must be independent of each other. This means that the value of one observation should not affect the value of another observation. # # Normality: The data should be normally distributed within each group. This means that the distribution of scores within each group should be roughly bell-shaped. # # Homogeneity of Variance: The variances of the groups should be equal. This means that the spread of scores within each group should be roughly the same. # # Random Sampling: The groups should be randomly selected from the population. # What are the three types of ANOVA, and in what situations would each be used? # One-Way ANOVA: This type of ANOVA is used when there is one independent variable with three or more levels or groups. For example, if a researcher wants to compare the mean scores of three different groups on a single dependent variable, one-way ANOVA would be appropriate. # # Two-Way ANOVA: This type of ANOVA is used when there are two independent variables, and the interaction between these variables is of interest. For example, if a researcher wants to investigate the effects of two different treatments (independent variables) on a dependent variable, two-way ANOVA would be appropriate. # # Three-Way ANOVA: This type of ANOVA is used when there are three independent variables, and the interaction between these variables is of interest. For example, if a researcher wants to investigate the effects of different factors such as gender, age, and education level on a dependent variable, three-way ANOVA would be appropriate. # What is the partitioning of variance in ANOVA, and why is it important to understand this concept? # The partitioning of variance in ANOVA refers to the division of the total variance of a dependent variable into separate components that can be attributed to different sources or factors. This partitioning is essential for understanding the relative importance of these sources of variation and determining their contributions to the variation in the dependent variable. # # Understanding the partitioning of variance is important because it helps researchers to identify which factors are most important in explaining the variation in the dependent variable. This information can be used to develop theories about the underlying causes of the observed differences between groups and to guide future research. Moreover, partitioning the variance helps researchers to identify potential confounding variables that may affect the results of the analysis and control for them in subsequent analyses. # How would you calculate the total sum of squares (SST), explained sum of squares (SSE), and residual # sum of squares (SSR) in a one-way ANOVA using Python? # In[69]: import numpy as np from scipy.stats import f_oneway group1 = np.array([1, 2, 3, 4, 5]) group2 = np.array([2, 4, 6, 8, 10]) group3 = np.array([3, 6, 9, 12, 15]) data = np.concatenate([group1, group2, group3]) f_statistic, p_value = f_oneway(group1, group2, group3) sst = np.sum((data - np.mean(data)) ** 2) sse = np.sum((np.mean(group1, axis=0) - np.mean(data)) ** 2) + np.sum((np.mean(group2, axis=0) - np.mean(data)) ** 2) + np.sum((np.mean(group3, axis=0) - np.mean(data)) ** 2) ssr = sst - sse print("SST:", sst) print("SSE:", sse) print("SSR:", ssr) print("F statistic:", f_statistic) print("p-value:", p_value) # In a two-way ANOVA, how would you calculate the main effects and interaction effects using Python? # In[68]: import pandas as pd import statsmodels.api as sm from statsmodels.formula.api import ols data = pd.DataFrame({'dependent_variable': [5, 8, 9, 12, 7, 6, 10, 11, 15, 16, 14, 18], 'factor_A': ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'C', 'C', 'C', 'C'], 'factor_B': ['X', 'X', 'Y', 'Y', 'X', 'X', 'Y', 'Y', 'X', 'X', 'Y', 'Y']}) model = ols('dependent_variable ~ factor_A + factor_B + factor_A:factor_B', data=data).fit() main_effect_A = sm.stats.anova_lm(model, typ=2)['sum_sq']['factor_A'] main_effect_B = sm.stats.anova_lm(model, typ=2)['sum_sq']['factor_B'] interaction_effect = sm.stats.anova_lm(model, typ=2)['sum_sq']['factor_A:factor_B'] print("Main effect of factor A: ", main_effect_A) print("Main effect of factor B: ", main_effect_B) print("Interaction effect: ", interaction_effect) # Suppose you conducted a one-way ANOVA and obtained an F-statistic of 5.23 and a p-value of 0.02. # What can you conclude about the differences between the groups, and how would you interpret these # results? # # If you conducted a one-way ANOVA and obtained an F-statistic of 5.23 and a p-value of 0.02, this indicates that there is a significant difference between the groups. Specifically, it means that the variability between the group means is larger than would be expected by chance, and that the difference between at least two of the groups is statistically significant. # # The F-statistic is a ratio of the variability between the groups to the variability within the groups. A high F-statistic and a low p-value indicate that the variability between the groups is significantly greater than the variability within the groups. # # In this case, a p-value of 0.02 indicates that the probability of observing an F-statistic at least as large as 5.23 under the null hypothesis (i.e., that there is no difference between the group means) is only 0.02. Typically, a p-value threshold of 0.05 is used to determine statistical significance, which means that the probability of observing such a large F-statistic by chance is less than 5%. Therefore, we can reject the null hypothesis and conclude that there is a statistically significant difference between the groups. # In a repeated measures ANOVA, how would you handle missing data, and what are the potential # consequences of using different methods to handle missing data? # Handling missing data in a repeated measures ANOVA can be challenging since the repeated nature of the data means that missing values may be correlated with the outcome variable or with other predictor variables. There are several methods for handling missing data, each with its own advantages and disadvantages: # # Complete case analysis: This involves analyzing only the cases that have complete data for all variables of interest. The advantage of this method is that it is simple to implement, and it can produce unbiased estimates if the missing data are missing completely at random (MCAR). However, this method can result in a loss of statistical power and precision if the amount of missing data is large or if the missing data are not MCAR. # # Pairwise deletion: This involves analyzing all available data for each variable separately, ignoring missing data in the other variables. The advantage of this method is that it maximizes the use of available data, but it can also result in a loss of statistical power and precision if the amount of missing data is large or if the missing data are not MCAR. # # Imputation: This involves replacing missing values with estimated values based on the observed data. There are several methods of imputation, including mean imputation, regression imputation, and multiple imputation. The advantage of this method is that it can produce unbiased estimates and increase statistical power and precision. However, the validity of the imputed values depends on the imputation model, and imputation can introduce additional variability into the data. # What are some common post-hoc tests used after ANOVA, and when would you use each one? Provide # an example of a situation where a post-hoc test might be necessary. # Post-hoc tests are used after conducting an ANOVA to determine which specific group means differ significantly from each other. Some common post-hoc tests include: # # Tukey's HSD: This test is used to compare all possible pairs of means and control the family-wise error rate. It is a conservative test that is suitable when the sample sizes are equal and the variances are homogenous. # # Bonferroni correction: This test is used to control the family-wise error rate by adjusting the alpha level for multiple comparisons. It is a conservative test that is suitable when the sample sizes are small and the variances are heterogeneous. # # Scheffe's test: This test is used to control the family-wise error rate by adjusting the alpha level for multiple comparisons. It is a more liberal test that is suitable when the sample sizes are unequal or the variances are heterogeneous. # # Games-Howell test: This test is used when the assumptions of equal variances and normality are violated. It is a more liberal test that does not assume equal variances or sample sizes. # # Dunnett's test: This test is used to compare multiple treatments to a control group. It is a more powerful test than Bonferroni correction and is suitable when there is a clear control group. # # Post-hoc tests may be necessary when a significant F-test is obtained in an ANOVA, indicating that there is a significant difference among the groups, but we do not know which specific groups differ from each other. For example, suppose we conduct an ANOVA on a dataset comparing the effectiveness of three different treatments for reducing blood pressure. If the ANOVA reveals a significant difference among the treatments, we would need to use a post-hoc test to determine which specific treatments differ from each other. # # # A researcher wants to compare the mean weight loss of three diets: A, B, and C. They collect data from # 50 participants who were randomly assigned to one of the diets. Conduct a one-way ANOVA using Python # to determine if there are any significant differences between the mean weight loss of the three diets. # Report the F-statistic and p-value, and interpret the results. # In[70]: import scipy.stats as stats weight_loss_A = [3.2, 4.5, 1.2, 2.3, 2.8, 3.9, 4.1, 3.3, 1.8, 2.1, 2.5, 3.7, 4.4, 2.6, 1.7, 2.9, 3.1, 4.8, 3.5, 2.7, 3.8, 2.2, 2.9, 1.5, 2.6] weight_loss_B = [2.5, 1.8, 3.9, 3.3, 2.7, 1.5, 2.2, 2.9, 1.7, 3.5, 3.1, 4.5, 2.8, 3.2, 2.6, 2.1, 1.2, 2.3, 2.6, 1.4, 2.4, 3.8, 4.1, 3.7, 4.4] weight_loss_C = [1.3, 3.5, 2.7, 2.9, 1.2, 2.5, 3.8, 4.5, 2.8, 1.5, 2.1, 1.7, 3.3, 3.1, 4.4, 2.6, 2.2, 3.9, 3.7, 2.3, 1.8, 3.2, 2.9, 1.7, 2.6] f_statistic, p_value = stats.f_oneway(weight_loss_A, weight_loss_B, weight_loss_C) print("F-Statistic: ", f_statistic) print("P-Value: ", p_value) # A company wants to know if there are any significant differences in the average time it takes to # complete a task using three different software programs: Program A, Program B, and Program C. They # randomly assign 30 employees to one of the programs and record the time it takes each employee to # complete the task. Conduct a two-way ANOVA using Python to determine if there are any main effects or # interaction effects between the software programs and employee experience level (novice vs. # experienced). Report the F-statistics and p-values, and interpret the results. # In[81]: import numpy as np import pandas as pd import statsmodels.api as sm from statsmodels.formula.api import ols np.random.seed(1234) n = 30 programs = ['A', 'B', 'C'] exp_levels = ['novice', 'experienced'] data = pd.DataFrame({'program': np.random.choice(programs, n), 'exp_level': np.random.choice(exp_levels, n), 'time': np.random.normal(10, 2, n)}) model = ols('time ~ C(program) + C(exp_level) + C(program):C(exp_level)', data=data).fit() print(model.summary()) # An educational researcher is interested in whether a new teaching method improves student test # scores. They randomly assign 100 students to either the control group (traditional teaching method) or the # experimental group (new teaching method) and administer a test at the end of the semester. Conduct a # two-sample t-test using Python to determine if there are any significant differences in test scores # between the two groups. If the results are significant, follow up with a post-hoc test to determine which # group(s) differ significantly from each other. # In[83]: import numpy as np from scipy import stats np.random.seed(1234) n = 100 control_scores = np.random.normal(70, 10, n) exp_scores = np.random.normal(75, 10, n) t_stat, p_value = stats.ttest_ind(control_scores, exp_scores) if p_value < 0.05: print("There is a significant difference between the control and experimental groups (p = {:.3f}).".format(p_value)) else: print("There is no significant difference between the control and experimental groups (p = {:.3f}).".format(p_value)) from statsmodels.stats.multicomp import pairwise_tukeyhsd tukey_results = pairwise_tukeyhsd(np.concatenate([control_scores, exp_scores]), np.concatenate([np.repeat('control', n), np.repeat('experimental', n)]), alpha=0.05) print(tukey_results) # A researcher wants to know if there are any significant differences in the average daily sales of three # retail stores: Store A, Store B, and Store C. They randomly select 30 days and record the sales for each store # on those days. Conduct a repeated measures ANOVA using Python to determine if there are any # # significant differences in sales between the three stores. If the results are significant, follow up with a post- # hoc test to determine which store(s) differ significantly from each other. # In[85]: import numpy as np import pandas as pd import statsmodels.api as sm from statsmodels.formula.api import ols np.random.seed(1234) n_days = 30 sales_a = np.random.normal(1000, 100, n_days) sales_b = np.random.normal(1200, 150, n_days) sales_c = np.random.normal(800, 80, n_days) df = pd.DataFrame({ 'store': np.repeat(['A', 'B', 'C'], n_days), 'sales': np.concatenate([sales_a, sales_b, sales_c]) }) model = ols('sales ~ store', data=df).fit() anova_table = sm.stats.anova_lm(model, typ=2) if anova_table['PR(>F)']['store'] < 0.05: print("There is a significant difference in sales between the three stores (p = {:.3f}).".format(anova_table['PR(>F)']['store'])) else: print("There is no significant difference in sales between the three stores (p = {:.3f}).".format(anova_table['PR(>F)']['store'])) tukey_results = pairwise_tukeyhsd(df['sales'], df['store'], alpha=0.05) print(tukey_results) # In[ ]:
Rach2312/Python
13 Mar_AssQ.py
13 Mar_AssQ.py
py
14,924
python
en
code
0
github-code
13
65862183
import numpy as np import scipy as sp from scipy.optimize import leastsq import matplotlib.pyplot as plt def real_fun(x): return np.sin(2*np.pi*x) def fit_fun(p,x): f = np.poly1d(p) return f(x) def res_fun(p,x,y_real): ret = fit_fun(p, x)-y_real return ret def res_fun_addregular(p,x,y_real): regularization = 0.0001 ret = fit_fun(p,x)- y_real ret = np.append(ret,np.sqrt(0.5*regularization*np.square(p))) return ret def fitting(M,x,y,mode): if mode == "regular": p_init = np.random.rand(M+1) #随机初始化多项式参数 参数比最高次数多1 p_lsq = leastsq(res_fun_addregular,p_init,args=(x,y)) print("多项式的参数", p_lsq[0]) return p_lsq[0] else: p_init = np.random.rand(M+1) p_lsq = leastsq(res_fun,p_init,args=(x,y)) print("多项式的参数",p_lsq[0]) return p_lsq[0] def inference(testpoints,trained_parameters,x,y): y_real = real_fun(testpoints) y_predict = fit_fun(trained_parameters,testpoints) plt.plot(testpoints, y_real,label="real") plt.plot(testpoints, y_predict,label="fitted") plt.plot(x, y,'bo',label='noise') plt.show() def main(): x = np.linspace(0,1,10) testpoints = np.linspace(0,1,1000) y_ = real_fun(x) y = [np.random.normal(0, 0.1)+Y for Y in y_] parameters = fitting(3,x,y,mode="regular") inference(testpoints,parameters,x,y) if __name__ == '__main__': main()
HitAgain/Machine-Learning-practice
Least_Suqare_Method/Lsq_no_regular.py
Lsq_no_regular.py
py
1,466
python
en
code
2
github-code
13
13335225805
from fastapi.testclient import TestClient from main import app # test to check the correct functioning of the /ping route def test_ping(): with TestClient(app) as client: response = client.get("/ping") # asserting the correct response is received assert response.status_code == 200 assert response.json() == {"ping": "pong"}
swapnam77/PGCSEDS-IIITH-hackathon-3
test_app.py
test_app.py
py
362
python
en
code
0
github-code
13
72839134418
from .model_pomm import PommNet from .model_generic import CNNBase, MLPBase from .policy import Policy def create_policy(obs_space, nn_kwargs={}, train=True): obs_shape = obs_space.shape nn = PommNet(obs_shape=obs_shape, **nn_kwargs) if train: nn.train() else: nn.eval() policy = Policy(nn) return policy
JacobPjetursson/Pommerman_Project
src/models/factory.py
factory.py
py
350
python
en
code
1
github-code
13
27733012523
from vsc.utils import fancylogger from easybuild.tools.build_log import EasyBuildError _log = fancylogger.getLogger('easyconfig.default', fname=False) # we use a tuple here so we can sort them based on the numbers ALL_CATEGORIES = { 'HIDDEN': (-1, 'hidden'), 'MANDATORY': (0, 'mandatory'), 'CUSTOM': (1, 'easyblock-specific'), 'TOOLCHAIN': (2, 'toolchain'), 'BUILD': (3, 'build'), 'FILEMANAGEMENT': (4, 'file-management'), 'DEPENDENCIES': (5, 'dependencies'), 'LICENSE': (6, 'license'), 'EXTENSIONS': (7, 'extensions'), 'MODULES': (8, 'modules'), 'OTHER': (9, 'other'), } # define constants so they can be used below # avoid that pylint complains about unknown variables in this file # pylint: disable=E0602 globals().update(ALL_CATEGORIES) # List of tuples. Each tuple has the following format (key, [default, help text, category]) DEFAULT_CONFIG = { # MANDATORY easyconfig parameters 'description': [None, 'A short description of the software', MANDATORY], 'homepage': [None, 'The homepage of the software', MANDATORY], 'name': [None, "Name of software", MANDATORY], 'toolchain': [None, 'Name and version of toolchain', MANDATORY], 'version': [None, "Version of software", MANDATORY], # TODO not yet in MANDATORY_PARAMS, so not enforced (only enforced in v2) 'software_license': [None, 'Software license', MANDATORY], 'software_license_urls': [None, 'List of software license locations', MANDATORY], # TODO not yet in MANDATORY_PARAMS, so not enforced (only enforced in v2) 'docurls': [None, 'List of urls with documentation of the software (not necessarily on homepage)', MANDATORY], # TOOLCHAIN easyconfig parameters 'onlytcmod': [False, ('Boolean/string to indicate if the toolchain should only load ' 'the environment with module (True) or also set all other ' 'variables (False) like compiler CC etc (if string: comma ' 'separated list of variables that will be ignored).'), TOOLCHAIN], 'toolchainopts': [None, 'Extra options for compilers', TOOLCHAIN], # BUILD easyconfig parameters 'buildopts': ['', 'Extra options passed to make step (default already has -j X)', BUILD], 'checksums': [[], "Checksums for sources and patches", BUILD], 'configopts': ['', 'Extra options passed to configure (default already has --prefix)', BUILD], 'easyblock': [None, "EasyBlock to use for building; if set to None, an easyblock is selected " "based on the software name", BUILD], 'easybuild_version': [None, "EasyBuild-version this spec-file was written for", BUILD], 'installopts': ['', 'Extra options for installation', BUILD], 'maxparallel': [None, 'Max degree of parallelism', BUILD], 'parallel': [None, ('Degree of parallelism for e.g. make (default: based on the number of ' 'cores, active cpuset and restrictions in ulimit)'), BUILD], 'patches': [[], "List of patches to apply", BUILD], 'prebuildopts': ['', 'Extra options pre-passed to build command.', BUILD], 'preconfigopts': ['', 'Extra options pre-passed to configure.', BUILD], 'preinstallopts': ['', 'Extra prefix options for installation.', BUILD], 'postinstallcmds': [[], 'Commands to run after the install step.', BUILD], 'runtest': [None, ('Indicates if a test should be run after make; should specify argument ' 'after make (for e.g.,"test" for make test)'), BUILD], 'sanity_check_commands': [[], ("format: [(name, options)] e.g. [('gzip','-h')]. " "Using a non-tuple is equivalent to (name, '-h')"), BUILD], 'sanity_check_paths': [{}, ("List of files and directories to check " "(format: {'files':<list>, 'dirs':<list>})"), BUILD], 'skip': [False, "Skip existing software", BUILD], 'skipsteps': [[], "Skip these steps", BUILD], 'source_urls': [[], "List of URLs for source files", BUILD], 'sources': [[], "List of source files", BUILD], 'stop': [None, 'Keyword to halt the build process after a certain step.', BUILD], 'tests': [[], ("List of test-scripts to run after install. A test script should return a " "non-zero exit status to fail"), BUILD], 'unpack_options': ['', "Extra options for unpacking source", BUILD], 'unwanted_env_vars': [[], "List of environment variables that shouldn't be set during build", BUILD], 'versionprefix': ['', ('Additional prefix for software version ' '(placed before version and toolchain name)'), BUILD], 'versionsuffix': ['', 'Additional suffix for software version (placed after toolchain name)', BUILD], # FILEMANAGEMENT easyconfig parameters 'buildininstalldir': [False, ('Boolean to build (True) or not build (False) in the installation directory'), FILEMANAGEMENT], 'cleanupoldbuild': [True, ('Boolean to remove (True) or backup (False) the previous build ' 'directory with identical name or not.'), FILEMANAGEMENT], 'cleanupoldinstall': [True, ('Boolean to remove (True) or backup (False) the previous install ' 'directory with identical name or not.'), FILEMANAGEMENT], 'dontcreateinstalldir': [False, ('Boolean to create (False) or not create (True) the install directory'), FILEMANAGEMENT], 'keeppreviousinstall': [False, ('Boolean to keep the previous installation with identical ' 'name. Experts only!'), FILEMANAGEMENT], 'keepsymlinks': [False, ('Boolean to determine whether symlinks are to be kept during copying ' 'or if the content of the files pointed to should be copied'), FILEMANAGEMENT], 'start_dir': [None, ('Path to start the make in. If the path is absolute, use that path. ' 'If not, this is added to the guessed path.'), FILEMANAGEMENT], # DEPENDENCIES easyconfig parameters 'allow_system_deps': [[], "Allow listed system dependencies (format: (<name>, <version>))", DEPENDENCIES], 'builddependencies': [[], "List of build dependencies", DEPENDENCIES], 'dependencies': [[], "List of dependencies", DEPENDENCIES], 'hiddendependencies': [[], "List of dependencies available as hidden modules", DEPENDENCIES], 'osdependencies': [[], "OS dependencies that should be present on the system", DEPENDENCIES], # LICENSE easyconfig parameters 'group': [None, "Name of the user group for which the software should be available", LICENSE], 'key': [None, 'Key for installing software', LICENSE], 'license_file': [None, 'License file for software', LICENSE], 'license_server': [None, 'License server for software', LICENSE], 'license_server_port': [None, 'Port for license server', LICENSE], # EXTENSIONS easyconfig parameters 'exts_classmap': [{}, "Map of extension name to class for handling build and installation.", EXTENSIONS], 'exts_defaultclass': [None, "List of module for and name of the default extension class", EXTENSIONS], 'exts_filter': [None, ("Extension filter details: template for cmd and input to cmd " "(templates for name, version and src)."), EXTENSIONS], 'exts_list': [[], 'List with extensions added to the base installation', EXTENSIONS], # MODULES easyconfig parameters 'modextrapaths': [{}, "Extra paths to be prepended in module file", MODULES], 'modextravars': [{}, "Extra environment variables to be added to module file", MODULES], 'modloadmsg': [{}, "Message that should be printed when generated module is loaded", MODULES], 'modluafooter': ["", "Footer to include in generated module file (Lua syntax)", MODULES], 'modtclfooter': ["", "Footer to include in generated module file (Tcl syntax)", MODULES], 'modaliases': [{}, "Aliases to be defined in module file", MODULES], 'moduleclass': ['base', 'Module class to be used for this software', MODULES], 'moduleforceunload': [False, 'Force unload of all modules when loading the extension', MODULES], 'moduleloadnoconflict': [False, "Don't check for conflicts, unload other versions instead ", MODULES], 'include_modpath_extensions': [True, "Include $MODULEPATH extensions specified by module naming scheme.", MODULES], # OTHER easyconfig parameters 'buildstats': [None, "A list of dicts with build statistics", OTHER], } def sorted_categories(): """ returns the categories in the correct order """ categories = ALL_CATEGORIES.values() categories.sort(key=lambda c: c[0]) return categories def get_easyconfig_parameter_default(param): """Get default value for given easyconfig parameter.""" if param not in DEFAULT_CONFIG: raise EasyBuildError("Unkown easyconfig parameter: %s (known: %s)", param, sorted(DEFAULT_CONFIG.keys())) else: _log.debug("Returning default value for easyconfig parameter %s: %s" % (param, DEFAULT_CONFIG[param][0])) return DEFAULT_CONFIG[param][0]
ULHPC/modules
easybuild/easybuild-framework/easybuild/framework/easyconfig/default.py
default.py
py
9,225
python
en
code
2
github-code
13
28568754981
import sqlalchemy from pibble.database.orm import ( ORMObjectBase, ORMBuilder, ORMEncryptedStringType, ORMVariadicType, ORMEncryptedVariadicType, ORM, ) from pibble.util.log import DebugUnifiedLoggingContext from pibble.util.helpers import Assertion, expect_exception from pibble.api.exceptions import BadRequestError, PermissionError class ORMTestBase(ORMObjectBase): pass class Page(ORMTestBase): __tablename__ = "page" id = sqlalchemy.Column( sqlalchemy.Integer, sqlalchemy.Sequence("page_id_sequence"), primary_key=True ) text = sqlalchemy.Column(sqlalchemy.Text) password = sqlalchemy.Column(sqlalchemy.String, nullable=True) # Hidden column encrypted_field = sqlalchemy.Column(ORMEncryptedStringType) # Two-way encrypt variadic_field = sqlalchemy.Column(ORMVariadicType) variadic_encrypted = sqlalchemy.Column(ORMEncryptedVariadicType) Page.Hide(columns=["password"]) class Keyword(ORMTestBase): __tablename__ = "keyword" id = sqlalchemy.Column( sqlalchemy.Integer, sqlalchemy.Sequence("keyword_id_sequence"), primary_key=True ) name = sqlalchemy.Column(sqlalchemy.String) class KeywordRelationship(ORMTestBase): keyword_id = sqlalchemy.Column( Keyword.ForeignKey("id", ondelete="CASCADE", onupdate="CASCADE"), primary_key=True, ) id = sqlalchemy.Column( sqlalchemy.Integer, sqlalchemy.Sequence("keyword_id_sequence"), primary_key=True ) keyword = Keyword.Relationship(backref="hidden_relationships") Keyword.Hide(relationships=["hidden_relationships"]) class PageKeywords(ORMTestBase): __tablename__ = "page_keywords" page_id = sqlalchemy.Column(Page.ForeignKey("id"), primary_key=True) keyword_id = sqlalchemy.Column(Keyword.ForeignKey("id"), primary_key=True) page = Page.Relationship(backref="PageKeywords") keyword = Keyword.Relationship(backref="PageKeywords") def test_oop(orm: ORM) -> None: with orm.session() as session: page_1 = session.add( orm.Page( text="text1", encrypted_field="encrypted1", variadic_field=True, variadic_encrypted=4, ) ) page_2 = session.add( orm.models["Page"]( text="text2", encrypted_field="encrypted2", variadic_field=10.0, variadic_encrypted=[None, "null", {"key": []}], ) ) # Other syntax session.commit() keyword_1 = session.add(orm.Keyword(name="keyword1")) keyword_2 = session.add(orm.Keyword(name="keyword2")) keyword_3 = session.add(orm.Keyword(name="keyword3")) session.commit() pk_1 = session.add(orm.PageKeywords(page_id=page_1.id, keyword_id=keyword_1.id)) pk_2 = session.add(orm.PageKeywords(page_id=page_1.id, keyword_id=keyword_2.id)) pk_3 = session.add(orm.PageKeywords(page_id=page_2.id, keyword_id=keyword_1.id)) pk_4 = session.add(orm.PageKeywords(page_id=page_2.id, keyword_id=keyword_3.id)) session.commit() Assertion(Assertion.EQ)( ["keyword1", "keyword2"], [pk.keyword.name for pk in page_1.PageKeywords] ) Assertion(Assertion.EQ)( ["keyword1", "keyword3"], [pk.keyword.name for pk in page_2.PageKeywords] ) expected_format_response = { "type": "Page", "attributes": { "text": "text1", "id": 1, "encrypted_field": "encrypted1", "variadic_field": True, "variadic_encrypted": 4, }, "include": { "PageKeywords": [ { "type": "PageKeywords", "attributes": {"page_id": 1, "keyword_id": 1}, "include": { "keyword": [ { "type": "Keyword", "attributes": {"id": 1, "name": "keyword1"}, } ] }, }, { "type": "PageKeywords", "attributes": {"page_id": 1, "keyword_id": 2}, "include": { "keyword": [ { "type": "Keyword", "attributes": {"id": 2, "name": "keyword2"}, } ] }, }, ] }, } Assertion(Assertion.EQ, diff_split_on=",")( page_1.format(include=["PageKeywords", "PageKeywords.keyword"]), expected_format_response, ) expected_format_response_2 = { "type": "Page", "attributes": { "text": "text2", "id": 2, "encrypted_field": "encrypted2", "variadic_field": 10.0, "variadic_encrypted": [None, None, {"key": []}], }, } Assertion(Assertion.EQ, diff_split_on=",")( page_2.format(), expected_format_response_2 ) # Make sure encryption worked should_be_encrypted = orm.engine.execute( "SELECT encrypted_field FROM page WHERE id = {0}".format(page_1.id) ).fetchone()[0] Assertion(Assertion.NEQ)(page_1.encrypted_field, should_be_encrypted) if getattr(orm, "cipher", None) is None: raise ValueError("ORM cipher not instantiated.") Assertion(Assertion.EQ)( page_1.encrypted_field, orm.cipher.decrypt(should_be_encrypted), # type: ignore ) expect_exception(PermissionError)( lambda: page_1.format( include=[ "PageKeywords", "PageKeywords.keyword", "PageKeywords.keyword.hidden_relationships", ] ) ) expect_exception(BadRequestError)( lambda: page_1.format(include=["a_bad_relationship"]) ) expected_format_response["see"] = [ # type: ignore dict([(key, item[key]) for key in item if key != "include"]) # type: ignore for item in expected_format_response["include"]["PageKeywords"] # type: ignore ] del expected_format_response["include"] page_1.see(pk_1, pk_2) Assertion(Assertion.EQ, diff_split_on=",")( page_1.format(), expected_format_response ) def main() -> None: with DebugUnifiedLoggingContext(): orm = ORMBuilder("sqlite", base=ORMTestBase) orm.migrate() test_oop(orm) if __name__ == "__main__": main()
painebenjamin/pibble
test/2_orm.py
2_orm.py
py
7,027
python
en
code
1
github-code
13
1732091140
""" Genius Thin wrapper around the Genius API """ from __future__ import print_function from functools import wraps import requests def textformat(func): "Add text_format value to kwargs if not supplied" @wraps(func) def inner(*args, **kwargs): "Add text_format to kwargs" try: tformat = kwargs['text_format'] except KeyError: tformat = 'dom' if tformat.lower() not in ['dom', 'html', 'plain']: raise TypeError("Optional arg 'text_format' can only be one of 'dom', 'html' or 'plain'") kwargs['text_format'] = tformat.lower() return func(*args, **kwargs) return inner class Genius(object): """ Thin wrapper around Genius API If all goes well, each method returns a dictionary of JSON data from the Genius REST API """ def __init__(self, access_token): _auth = {'Authorization': "Bearer {}".format(access_token)} self._session = requests.Session() self._session.headers.update(_auth) self.prefix = "https://api.genius.com" def __internal_call(self, method, url, params=None): response = self._session.request(method, url, params=params) if response.status_code == 200: return response.json() else: response.raise_for_status() def search(self, query): "Search documents hosted on Genius" url = "{}/search".format(self.prefix) payload = {"q": query} return self.__internal_call('GET', url, params=payload) @textformat def get_annotations(self, _id, text_format=None): "Data for a specific annotation." url = "{}/annotations/{}".format(self.prefix, _id) payload = {"text_format": text_format} return self.__internal_call('GET', url, params=payload) @textformat def get_referents(self, created_by_id, song_id=None, web_page_id=None, text_format=None, per_page=None, page=None): "Referents by content item or user responsible for an included annotation." # locals needs to be called first to limit its contents args = locals() if song_id and web_page_id: raise TypeError("You may pass only one of 'song_id' and 'web_page_id', not both.") # reduce dictionary of kwarg names and values to only those with values payload = { key : value for key, value in args.items() if value and key not in [ 'self', 'created_by_id' ]} print(payload) url = "{}/referents/{}".format(self.prefix, created_by_id) return self.__internal_call('GET', url, params=payload) @textformat def get_song(self, _id, text_format=None): "Data for a specific song." url = "{}/songs/{}".format(self.prefix, _id) payload = {"text_format": text_format} return self.__internal_call('GET', url, params=payload) @textformat def get_artist(self, _id, text_format=None): "Data for a specific artist." url = "{}/artists/{}".format(self.prefix, _id) payload = {"text_format": text_format} return self.__internal_call('GET', url, params=payload) def get_artist_songs(self, _id, sort=None, page=None, per_page=None): """Documents (songs) for the artist specified. By default, 20 items are returned for each request.""" # locals needs to be called first to limit its contents args = locals() # reduce dictionary of kwarg names and values to only those with values payload = { key : value for key, value in args.items() if value and key not in [ 'self', '_id' ]} print(payload) url = "{}/artists/{}/songs".format(self.prefix, _id) return self.__internal_call('GET', url, params=payload) def get_web_pages(self, raw_annotatable_url=None, canonical_url=None, og_url=None): # locals needs to be called first to limit its contents args = locals() if not(raw_annotatable_url or canonical_url or og_url): raise TypeError("Provide as many of the following variants of the URL as possible:\n 'raw_annotatable_url', 'canonical_url', 'og_url'") url = "{}/web_pages/lookup".format(self.prefix) # reduce dictionary of kwarg names and values to only those with values payload = { key : value for key, value in args.items() if value and key != 'self' } print(payload) return self.__internal_call('GET', url, params=payload) """ Methods requiring scopes. Not available when using a client_access_token thus not implemented. @textformat def get_account(self, text_format=None): #Account information for the currently authenticated user. #Requires scope: me url = "{}/account".format(self.prefix) return self.__internal_call('GET', url) def post_annotation(self): pass def update_annotation(self, _id): pass def delete_annotation(self, _id): pass def upvote_annotation(self, _id): pass def downvote_annotation(self, _id): pass def unvote_annotation(self, _id): pass """
emilkloeden/py-genius
py_genius/py_genius.py
py_genius.py
py
5,449
python
en
code
1
github-code
13
72775411537
from tgtg import TgtgClient from json import load, dump import requests import schedule import time import os # For remote deployment, the credentials are stored as environment variables in Heroku # Try to load the credentials remotely first. If this false, look for a local file # Try to first load credentials from environment credentials_remote_loaded = False try: # Credential handling heroku credentials = dict() credentials['email'] = os.environ['TGTG_EMAIL'] print(f"tgtg_email: {credentials['email']}") credentials['password'] = os.environ['TGTG_PW'] print(f"tgtg_pw: {credentials['password']}") telegram = dict() telegram['bot_chatID1'] = os.environ['TELEGRAM_BOT_CHATID1'] print(f"TELEGRAM_BOT_CHATID1: {telegram['bot_chatID1']}") telegram['bot_chatID2'] = os.environ['TELEGRAM_BOT_CHATID2'] print(f"TELEGRAM_BOT_CHATID2: {telegram['bot_chatID2']}") telegram['bot_token'] = os.environ['TELEGRAM_BOT_TOKEN'] print(f"TELEGRAM_BOT_TOKEN: {telegram['bot_token']}") credentials_remote_loaded = True except: print("No credentials found in Heroku environment") if credentials_remote_loaded == False: try: # Credential handling local version # Load tgtg account credentials from a hidden file f = open('telegram.json',) telegram = load(f) f.close() # Load tgtg account credentials from a hidden file f = open('credentials.json',) credentials = load(f) f.close() except: print("No files found for local credentials.") # Create the tgtg client with my credentials client = TgtgClient(email=credentials['email'], password=credentials['password']) # Init the favourites in stock list as a global variable favourites_in_stock = list() def telegram_bot_sendtext(bot_message, only_to_admin=False): """ Helper function: Send a message with the specified telegram bot. It can be specified if both users or only the admin receives the message Follow this article to figure out a specific chatID: https://medium.com/@ManHay_Hong/how-to-create-a-telegram-bot-and-send-messages-with-python-4cf314d9fa3e """ if only_to_admin: # ChadID1 is the admin chatIDlist = [telegram["bot_chatID1"]] else: chatIDlist = [telegram["bot_chatID1"], telegram["bot_chatID2"]] for id in chatIDlist: bot_token = telegram["bot_token"] send_text = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + id + '&parse_mode=Markdown&text=' + bot_message response = requests.get(send_text) return response.json() def telegram_bot_sendimage(image_url, image_caption=None): """ For sending an image in Telegram, that can also be accompanied by an image caption """ # Send the message to both users chatIDlist = [telegram["bot_chatID1"], telegram["bot_chatID2"]] for id in chatIDlist: bot_token = telegram["bot_token"] # Prepare the url for an telegram API call to send a photo send_text = 'https://api.telegram.org/bot' + bot_token + '/sendPhoto?chat_id=' + id + '&photo=' + image_url # If the argument gets passed, at a caption to the image if image_caption != None: send_text += '&caption=' + image_caption response = requests.get(send_text) return response.json() def fetch_stock_from_api(api_result): """ For fideling out the few important information out of the api response """ new_api_result = list() # Go through all favorites linked to the account,that are returned with the api for i in range(len(api_result)): current_fav = dict() current_fav['item_id'] = api_result[i]['item']['item_id'] current_fav['store_name'] = api_result[i]['store']['store_name'] current_fav['items_available'] = api_result[i]['items_available'] current_fav['category_picture'] = api_result[i]['store']['cover_picture']['current_url'] new_api_result.append(current_fav) return new_api_result def routine_check(): """ Function that gets called via schedule every 3 minutes. Retrieves the data from TGTG API and selects the message to send. """ # Get the global variable of items in stock global favourites_in_stock # Get all favorite items api_response = client.get_items() new_api_result = fetch_stock_from_api(api_response) # Go through all favourite items and compare the stock list_of_item_ids = [fav['item_id'] for fav in new_api_result] for item_id in list_of_item_ids: try: old_stock = [item['items_available'] for item in favourites_in_stock if item['item_id'] == item_id][0] except: old_stock = 0 print("An exception occurred: The item_id was not known as a favorite before") new_stock = [item['items_available'] for item in new_api_result if item['item_id'] == item_id][0] # Check, if the stock has changed. Send a message if so. if new_stock != old_stock: # Check if the stock was replenished, send an encouraging image message if old_stock == 0 and new_stock > 0: message = f"There are {new_stock} new goodie bags at {[item['store_name'] for item in new_api_result if item['item_id'] == item_id][0]}" image = [item['category_picture'] for item in new_api_result if item['item_id'] == item_id][0] telegram_bot_sendimage(image, message) elif old_stock > new_stock and new_stock != 0: # customer feedback: This message is not needed pass ## Prepare a generic string, but with the important info # message = f" 📉 Decrease from {old_stock} to {new_stock} available goodie bags at {[item['store_name'] for item in new_api_result if item['item_id'] == item_id][0]}." # telegram_bot_sendtext(message) elif old_stock > new_stock and new_stock == 0: message = f" ⭕ Sold out! There are no more goodie bags available at {[item['store_name'] for item in new_api_result if item['item_id'] == item_id][0]}." telegram_bot_sendtext(message) else: # Prepare a generic string, but with the important info message = f"There was a change of number of goodie bags in stock from {old_stock} to {new_stock} at {[item['store_name'] for item in new_api_result if item['item_id'] == item_id][0] }." telegram_bot_sendtext(message) # Reset the global information with the newest fetch favourites_in_stock = new_api_result # Print out some maintenance info in the terminal print(f"API run at {time.ctime(time.time())} successful. Current stock:") for item_id in list_of_item_ids: print(f"{[item['store_name'] for item in new_api_result if item['item_id'] == item_id][0]}:\ {[item['items_available'] for item in new_api_result if item['item_id'] == item_id][0]}") def still_alive(): """ This function gets called every 24 hours and sends a 'still alive' message to the admin. """ message = f"Current time: {time.ctime(time.time())}. The bot is still running. " global favourites_in_stock list_of_item_ids = [fav['item_id'] for fav in favourites_in_stock] for item_id in list_of_item_ids: message += (f"{[item['store_name'] for item in favourites_in_stock if item['item_id'] == item_id][0]}: {[item['items_available'] for item in favourites_in_stock if item['item_id'] == item_id][0]} items available") telegram_bot_sendtext(message, only_to_admin = True) # Use schedule to set up a recurrent checking schedule.every(3).minutes.do(routine_check) schedule.every(24).hours.do(still_alive) # Description of the sercive, that gets send once telegram_bot_sendtext("The bot script has started successfully. The bot checks every 3 minutes, if there is something new at TooGoodToGo. Every 24 hours, the bots sends a 'still alive'-message.", only_to_admin=True) while True: # run_pending schedule.run_pending() time.sleep(1)
AukiJuanDiaz/TGTG_Watchbot
watch_script.py
watch_script.py
py
8,171
python
en
code
6
github-code
13
47725754174
import torch import soundfile as sf import torch.nn as nn import torch.nn.functional as F from peft import LoraConfig, TaskType, get_peft_model from transformers import ( WhisperFeatureExtractor, WhisperModel, LlamaForCausalLM, LlamaTokenizer ) import librosa from beats.BEATs import BEATsConfig, BEATs from qformer.Qformer import BertConfig, BertLMHeadModel class SALMONN(nn.Module): def __init__( self, ckpt, whisper_path, beats_path, vicuna_path, speech_qformer_token_num=1, speech_qformer_layer=2, lora=True, lora_alpha=32, lora_rank=8, lora_dropout=0.1, second_per_frame=0.333333, second_stride=0.333333, low_resource=False ): super().__init__() # feature_extractor self.feature_extractor = WhisperFeatureExtractor.from_pretrained(whisper_path) # whisper self.speech_encoder = WhisperModel.from_pretrained(whisper_path).encoder self.ln_speech = nn.LayerNorm(self.speech_encoder.config.d_model) # beats self.beats_ckpt = beats_path beats_checkpoint = torch.load(self.beats_ckpt, map_location='cpu') beats_cfg = BEATsConfig(beats_checkpoint['cfg']) beats = BEATs(beats_cfg) beats.load_state_dict(beats_checkpoint['model']) self.beats = beats self.ln_audio = nn.LayerNorm(self.beats.cfg.encoder_embed_dim) for name, param in self.beats.named_parameters(): param.requires_grad = False self.beats.eval() # init speech Qformer self.speech_Qformer, self.speech_query_tokens = self.init_speech_Qformer( speech_qformer_token_num, self.speech_encoder.config.d_model + self.beats.cfg.encoder_embed_dim, speech_qformer_layer, ) self.second_per_frame = second_per_frame self.second_stride = second_stride # vicuna if not low_resource: self.llama_model = LlamaForCausalLM.from_pretrained( vicuna_path, torch_dtype=torch.float16, ) else: self.llama_model = LlamaForCausalLM.from_pretrained( vicuna_path, torch_dtype=torch.float16, load_in_8bit=True, device_map={'': 0} ) # lora self.lora = lora if lora: target_modules = None self.peft_config = LoraConfig( task_type=TaskType.CAUSAL_LM, inference_mode=True, r=lora_rank, lora_alpha=lora_alpha, lora_dropout=lora_dropout, target_modules=target_modules, ) self.llama_model = get_peft_model(self.llama_model, self.peft_config) # tokenizer self.llama_tokenizer = LlamaTokenizer.from_pretrained(vicuna_path, use_fast=False) self.llama_tokenizer.add_special_tokens({'pad_token': '[PAD]'}) self.llama_tokenizer.padding_side = "right" # proj self.speech_llama_proj = nn.Linear( self.speech_Qformer.config.hidden_size, self.llama_model.config.hidden_size) # load ckpt ckpt_dict = torch.load(ckpt)['model'] self.load_state_dict(ckpt_dict, strict=False) def generate( self, wav_path, prompt, prompt_pattern="USER: <Speech><SpeechHere></Speech> {}\nASSISTANT:", device='cuda:0', max_length=200, num_beams=4, do_sample=True, min_length=1, top_p=0.9, repetition_penalty=1.0, length_penalty=1.0, temperature=1.0, ): # read wav wav, sr = sf.read(wav_path) if len(wav.shape) == 2: wav = wav[:, 0] if len(wav) > 30 * sr: wav = wav[: 30 * sr] if sr != 16000: wav = librosa.resample(wav, orig_sr=sr, target_sr=16000, res_type="fft") # whisper spectrogram = self.feature_extractor(wav, return_tensors="pt", sampling_rate=16000).input_features.to(device) # [1, 80, 3000] speech_embeds = self.speech_encoder(spectrogram, return_dict=True).last_hidden_state # beats raw_wav = torch.from_numpy(wav).to(device).unsqueeze(0) audio_padding_mask = torch.zeros(raw_wav.shape, device=device).bool() audio_embeds, _ = self.beats.extract_features(raw_wav, padding_mask=audio_padding_mask, feature_only=True) # auditory embeds speech_embeds = self.ln_speech(speech_embeds) audio_embeds = self.ln_audio(audio_embeds) audio_embeds = F.pad(audio_embeds, (0, 0, 0, speech_embeds.size(1) - audio_embeds.size(1))) speech_embeds = torch.cat([speech_embeds, audio_embeds], dim=-1) # split frames B, T, C = speech_embeds.shape kernel = round(T * self.second_per_frame / 30.0) stride = round(T * self.second_stride / 30.0) kernel = (1, kernel) stride = (1, stride) speech_embeds_tr = speech_embeds.transpose(1, 2).unsqueeze(2) speech_embeds_overlap = F.unfold(speech_embeds_tr, kernel_size=kernel, dilation=1, padding=0, stride=stride) _, _, L = speech_embeds_overlap.shape speech_embeds_overlap = speech_embeds_overlap.view(B, -1, kernel[1], L) speech_embeds_overlap = torch.permute(speech_embeds_overlap, [0, 3, 2, 1]) speech_embeds = speech_embeds_overlap.reshape(-1, kernel[1], C) speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long, device=speech_embeds.device) # Qformer query_tokens = self.speech_query_tokens.expand(speech_embeds.shape[0], -1, -1) query_output = self.speech_Qformer.bert( query_embeds=query_tokens, encoder_hidden_states=speech_embeds, encoder_attention_mask=speech_atts, return_dict=True, ) speech_embeds = self.speech_llama_proj(query_output.last_hidden_state) speech_embeds = speech_embeds.view(B, -1, speech_embeds.size(2)).contiguous() speech_atts = torch.ones(speech_embeds.size()[:-1], dtype=torch.long).to(speech_embeds.device) # USER: <Speech>speech_embeds<Speech> prompt\nASSISTANT: embed_tokens = self.llama_model.model.model.embed_tokens if self.lora else self.llama_model.model.embed_tokens prompt_left, prompts_right = prompt_pattern.format(prompt).split('<SpeechHere>') prompt_left_ids = self.llama_tokenizer( prompt_left, return_tensors="pt", add_special_tokens=False ).to(speech_embeds.device).input_ids prompt_left_embeds = embed_tokens(prompt_left_ids) prompt_right_ids = self.llama_tokenizer( prompts_right, return_tensors="pt", add_special_tokens=False ).to(speech_embeds.device).input_ids prompt_right_embeds = embed_tokens(prompt_right_ids) bos_embeds = self.llama_model.model.embed_tokens( torch.ones( [1, 1], dtype=torch.long, device=device, ) * self.llama_tokenizer.bos_token_id ) if not self.lora else self.llama_model.model.model.embed_tokens( torch.ones( [1, 1], dtype=torch.long, device=device, ) * self.llama_tokenizer.bos_token_id ) embeds = torch.cat([bos_embeds, prompt_left_embeds, speech_embeds, prompt_right_embeds], dim=1) atts = torch.ones(embeds.size()[:-1], dtype=torch.long).to(embeds.device) # generate output = self.llama_model.generate( inputs_embeds=embeds, max_length=max_length, num_beams=num_beams, do_sample=do_sample, min_length=min_length, top_p=top_p, repetition_penalty=repetition_penalty, length_penalty=length_penalty, temperature=temperature, attention_mask=atts, bos_token_id=self.llama_tokenizer.bos_token_id, eos_token_id=self.llama_tokenizer.eos_token_id, pad_token_id=self.llama_tokenizer.pad_token_id ) output_text = self.llama_tokenizer.batch_decode(output, add_special_tokens=False, skip_special_tokens=True) return output_text def init_speech_Qformer(self, num_query_token, speech_width, num_hidden_layers=2): encoder_config = BertConfig() encoder_config.num_hidden_layers = num_hidden_layers encoder_config.encoder_width = speech_width encoder_config.add_cross_attention = True encoder_config.cross_attention_freq = 1 encoder_config.query_length = num_query_token Qformer = BertLMHeadModel(config=encoder_config) query_tokens = nn.Parameter( torch.zeros(1, num_query_token, encoder_config.hidden_size) ) query_tokens.data.normal_(mean=0.0, std=encoder_config.initializer_range) return Qformer, query_tokens
bytedance/SALMONN
model.py
model.py
py
9,164
python
en
code
623
github-code
13
26412983707
'''Valid Hexadecimal Representation of Number''' s=input() i=0 for i in range(len(s)): if (s[i]<'0' or s[i]>'9') and (s[i]<'A' or s[i]>'F'): print("no") print("yes")
PREMSAI2K1/code1
hexadecimal.py
hexadecimal.py
py
222
python
en
code
0
github-code
13
40963002804
from tkinter import CENTER from turtle import Turtle ALIGNMENT = 'center' FONT = ('Courier New', 18, 'normal') class Scoreboard(Turtle): def __init__(self) -> None: super().__init__() self.current_score = 0 self.clear() self.hideturtle() self.penup() self.color('white') self.goto(0, 270) self.write_score() def write_score(self): self.write(f"Score: {self.current_score}", align=ALIGNMENT, font=FONT) def update_score(self): self.current_score += 1 self.clear() self.write_score() def game_over(self): self.goto(0, 0) self.write(f"GAME OVER", align=ALIGNMENT, font=FONT)
jjbondoc/learning-python
hundred-days-of-code/day_020_snake/scoreboard.py
scoreboard.py
py
729
python
en
code
0
github-code
13
38036774878
def getText(node): s = '' for n in node.childNodes: if n.nodeType == node.TEXT_NODE: s += n.data return s def getSingleNodeText(node, tag): nodes = node.getElementsByTagName(tag) snode = nodes[0] return getText(snode)
rushioda/PIXELVALID_athena
athena/Tools/RunTimeTester/testsuite/src/parseHelpers.py
parseHelpers.py
py
290
python
en
code
1
github-code
13
42304085971
# -*- coding: utf-8 -*- """ Created on Sun Mar 12 21:59:20 2017 @author: Mateusz """ def variable(i, j): '''Funkcja tworzaca zmienna znakowa o zadanych subskryptach i, j.''' result = "x" result += str(i) result += "." result += str(j) return result def hetmani(n): '''Metaprogram tworzacy rozwiazanie problemu hetmanow na szachownicy o wymiarach n x n.''' result = "Maximize \nobj: " for i in range(1, n + 1): for j in range(1, n + 1): result += variable(i, j) if(not(i == n and j == n)): result += " + " else: result += "\n" #Warunki w wiersze result += "Subject to \n" for i in range(1, n + 1): for j in range(1, n + 1): result += variable(i, j) if(not(j == n)): result += " + " else: result += " <= 1\n" #Warunki w kolumny for i in range(1, n + 1): for j in range(1, n + 1): result += variable(j, i) if(not(j == n)): result += " + " else: result += " <= 1\n" #Warunki na przekatne k = 0 for i in range(-n + 1, n): k += 1 if (i <= 0): for j in range(1, k + 1): result += variable(j, j - i) if (j != k): result += " + " else: for j in range(i + 1, n + 1): result += variable(j, j - i) if (j != n): result += " + " result += " <= 1\n" #Warunki na antyprzekatne for i in range(2, 2 * n + 1): if (i <= n + 1): for j in range(1, i): result += variable(j, i - j) if (j != i - 1): result += " + " else: for j in range(i - n, n + 1): if (i - j >= 1): result += variable(j, i - j) if (i - j != i - n): result += " + " result += " <= 1\n" #Warunki na wartosci result += "Bounds \n" for i in range(1, n + 1): for j in range(1, n + 1): result += "0 <= " result += variable(i, j) result += " <= 1\n" #nazwy zmiennych result += "Generals \n" for i in range(1, n + 1): for j in range(1, n + 1): result += variable(i, j) result += "\n" result += "End" return result n = int(input("Podaj rozmiar szachownicy: "), 10) print(hetmani(n))
RioJack01/-optymalizacja-
lab2/hetmani.py
hetmani.py
py
2,597
python
en
code
0
github-code
13
22151854134
import copy import torch import torch.nn as nn from .layers import SublayerWrapper, PositionalEncoding, MultiHeadAttention, FeedForwardLayer from ..datasets.utils import Vocab class TransformerDecoderLayer(nn.Module): def __init__(self, dim, self_attn, src_attn, ffn, dropout): super().__init__() self.self_attn = SublayerWrapper(dim, self_attn, dropout) self.src_attn = SublayerWrapper(dim, src_attn, dropout) self.ffn = SublayerWrapper(dim, ffn, dropout) def forward(self, x, m, mem_mask, tgt_mask): """ Args: x: target input (bs, tgt_len, model_dim) m: source memory bank (bs, src_len, model_dim) mem_mask: (bs, tgt_len, src_len) tgt_mask: (bs, tgt_len, tgt_len) """ x = self.self_attn(x, x, x, tgt_mask) if m is not None: x = self.src_attn(x, m, m, mem_mask) x = self.ffn(x) return x class TransformerDecoder(nn.Module): def __init__(self, layers, heads, vocab_size, model_dim, ffn_dim, dropout=0.1): super().__init__() self.embed = nn.Embedding(vocab_size + len(Vocab.extra), model_dim) self.pe = PositionalEncoding(model_dim) c = copy.deepcopy mha = MultiHeadAttention(heads, model_dim) ffn = FeedForwardLayer(model_dim, ffn_dim) layer = TransformerDecoderLayer( model_dim, c(mha), c(mha), c(ffn), dropout) self.layers = nn.ModuleList([c(layer) for _ in range(layers)]) self.fc = nn.Linear(model_dim, vocab_size + len(Vocab.extra)) def forward(self, x, m, mem_mask, tgt_mask=None): """ Args: x: target input (bs, tgt_len, tgt_dim) m: source memory bank (bs, src_len, model_dim) mem_mask: (bs, tgt_len, src_len) tgt_mask: (bs, tgt_len, tgt_len) """ x = self.embed(x) x = self.pe(x) for layer in self.layers: x = layer(x, m, mem_mask, tgt_mask) x = self.fc(x) return x
enhuiz/transformer-pytorch
torchnmt/networks/decoders.py
decoders.py
py
2,052
python
en
code
1
github-code
13
86458979000
#%% # langugae list, character dictionary set and other helper functions LanguageList = [ 'HEBREW', 'ARABIC', 'PORTUGUESE', 'ITALIAN', 'FRENCH', 'SPANISH', 'GERMAN', 'ENGLISH', 'RUSSIAN', 'FINNISH', 'VIETNAMESE', 'KOREAN', 'CHINESE', 'JAPANESE' ] g1 = ['HEBREW','ARABIC'] g2 = ['PORTUGUESE','ITALIAN','FRENCH','SPANISH','GERMAN','ENGLISH','FINNISH'] g3 = ['RUSSIAN', 'KOREAN'] g4 = ['CHINESE','JAPANESE'] g5 = ['VIETNAMESE'] GroupList = [g1,g2,g3,g4,g5] GroupNameList = ['group%s'%str(i) for i in range(1,6)] def prediction(tag_seq): return [ix_to_tag[int(o)] for o in tag_seq] def prepare_sequence(seq, to_ix): idxs = [to_ix[w] for w in seq] return torch.tensor(idxs, dtype=torch.long) def prediction_str(tag_seq): out_list = [ix_to_tag[int(o)] for o in tag_seq] out_str = '' for o in out_list: out_str += o return out_str # create token list from BIESX tag def find_token(sentence_str): token = []; word = '' for i,tag in enumerate(sentence_str[1]): if tag == 'S': token.append(sentence_str[0][i]) continue if tag == 'X': continue if (tag == 'B') | (tag == 'I'): word += sentence_str[0][i] continue if tag == 'E': word+=sentence_str[0][i] token.append(word) word='' return token from torch.nn.utils.rnn import pad_sequence def prepare_batch(batch, to_ix): tensor_list = [] for seq in batch: idxs = [to_ix[w] for w in seq] tensor = torch.tensor(idxs, dtype=torch.long) tensor_list.append(tensor) return pad_sequence(tensor_list,batch_first=False) # with batch_first=False, the dimension come as (len(seq)#length of longest sequence,len(batch)#batch_size) def prepare_cse(sentence,batch_size=1): lm_f: LanguageModel = FlairEmbeddings('multi-forward').lm lm_b: LanguageModel = FlairEmbeddings('multi-backward').lm if batch_size == 1: embeds_f = lm_f.get_representation([sentence],'\n','\n')[1:-1,:,:] embeds_b = lm_b.get_representation([sentence],'\n','\n')[1:-1,:,:] elif batch_size >1: embeds_f = lm_f.get_representation(list(sentence),'\n','\n')[1:-1,:,:] embeds_b = lm_b.get_representation(list(sentence),'\n','\n')[1:-1,:,:] return torch.cat((embeds_f,embeds_b),dim=2) def argmax(vec): # return the argmax as a python int _, idx = torch.max(vec, 1) return idx.item() # Compute log sum exp in a numerically stable way for the forward algorithm def log_sum_exp(vec): max_score = vec[0, argmax(vec)] max_score_broadcast = max_score.view(1, -1).expand(1, vec.size()[1]) return max_score + \ torch.log(torch.sum(torch.exp(vec - max_score_broadcast))) def save_checkpoint(state, filename): print("=> Saving checkpoint") torch.save(state, filename) def load_checkpoint(checkpoint, model, optimizer): print("=> Loading checkpoint") model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) import pickle data_train,data_test,data_dev=[],[],[] for language in LanguageList: with open('./data/%s_Train.pickle'%language, 'rb') as f1: train = pickle.load(f1) with open('./data/%s_Test.pickle'%language, 'rb') as f2: test = pickle.load(f2) with open('./data/%s_Dev.pickle'%language, 'rb') as f3: dev = pickle.load(f3) data_train += train; data_test += test; data_dev += dev import numpy as np letter_to_ix = {} letter_to_ix[''] = 0 # need this for padding for sent, tags in data_train+data_test+data_dev: for letter in sent: if letter not in letter_to_ix: letter_to_ix[letter] = len(letter_to_ix) print('functions.py : Nr. of distinguish character: ',len(letter_to_ix.keys())) #%% # define class BiLSTM_CRF import torch # import torchvision import torch.nn as nn import torch.optim as optim # import torch.nn.functional as F from torch.utils.data import DataLoader from flair.embeddings import FlairEmbeddings from flair.models import LanguageModel class BiLSTM_CRF(nn.Module): def __init__(self, character_size, tag_to_ix, embedding_dim, hidden_dim, batch_size, START_TAG = "<START>", STOP_TAG = "<STOP>" ): super(BiLSTM_CRF, self).__init__() self.embedding_dim = embedding_dim self.hidden_dim = hidden_dim self.character_size = character_size self.tag_to_ix = tag_to_ix self.tagset_size = len(tag_to_ix) self.batch_size = batch_size self.character_embeds = nn.Embedding(character_size, embedding_dim) self.lstm = nn.LSTM(embedding_dim, hidden_dim , num_layers=1, bidirectional=True) # Maps the output of the LSTM into tag space. self.hidden2tag = nn.Linear(hidden_dim * 2, self.tagset_size) # Matrix of transition parameters. Entry i,j is the score of # transitioning *to* i *from* j. self.transitions = nn.Parameter( torch.randn(self.tagset_size, self.tagset_size)) # These two statements enforce the constraint that we never transfer # to the start tag and we never transfer from the stop tag self.transitions.data[tag_to_ix[START_TAG], :] = -10000 self.transitions.data[:, tag_to_ix[STOP_TAG]] = -10000 self.hidden = self.init_hidden() def init_hidden(self): return (torch.randn(2, 1, self.hidden_dim ), torch.randn(2, 1, self.hidden_dim )) def _forward_alg(self, feats): # Do the forward algorithm to compute the partition function init_alphas = torch.full((1, self.tagset_size), -10000.) # START_TAG has all of the score. init_alphas[0][self.tag_to_ix[START_TAG]] = 0. # Wrap in a variable so that we will get automatic backprop forward_var = init_alphas # Iterate through the sentence for feat in feats: alphas_t = [] # The forward tensors at this timestep for next_tag in range(self.tagset_size): # broadcast the emission score: it is the same regardless of # the previous tag emit_score = feat[next_tag].view( 1, -1).expand(1, self.tagset_size) # the ith entry of trans_score is the score of transitioning to # next_tag from i trans_score = self.transitions[next_tag].view(1, -1) # The ith entry of next_tag_var is the value for the # edge (i -> next_tag) before we do log-sum-exp next_tag_var = forward_var + trans_score + emit_score # The forward variable for this tag is log-sum-exp of all the # scores. alphas_t.append(log_sum_exp(next_tag_var).view(1)) forward_var = torch.cat(alphas_t).view(1, -1) terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]] alpha = log_sum_exp(terminal_var) return alpha def _get_lstm_features(self, sentence): self.hidden = self.init_hidden() embeds = self.character_embeds(sentence).view(len(sentence), 1, -1) lstm_out, self.hidden = self.lstm(embeds, self.hidden) lstm_out = lstm_out.view(len(sentence), self.hidden_dim *2) lstm_feats = self.hidden2tag(lstm_out) return lstm_feats def _score_sentence(self, feats, tags): # Gives the score of a provided tag sequence score = torch.zeros(1) tags = torch.cat([torch.tensor([self.tag_to_ix[START_TAG]], dtype=torch.long), tags]) for i, feat in enumerate(feats): score = score + \ self.transitions[tags[i + 1], tags[i]] + feat[tags[i + 1]] score = score + self.transitions[self.tag_to_ix[STOP_TAG], tags[-1]] return score def _viterbi_decode(self, feats): backpointers = [] # Initialize the viterbi variables in log space init_vvars = torch.full((1, self.tagset_size), -10000.) init_vvars[0][self.tag_to_ix[START_TAG]] = 0 # forward_var at step i holds the viterbi variables for step i-1 forward_var = init_vvars for feat in feats: bptrs_t = [] # holds the backpointers for this step viterbivars_t = [] # holds the viterbi variables for this step for next_tag in range(self.tagset_size): # next_tag_var[i] holds the viterbi variable for tag i at the # previous step, plus the score of transitioning # from tag i to next_tag. # We don't include the emission scores here because the max # does not depend on them (we add them in below) next_tag_var = forward_var + self.transitions[next_tag] best_tag_id = argmax(next_tag_var) bptrs_t.append(best_tag_id) viterbivars_t.append(next_tag_var[0][best_tag_id].view(1)) # Now add in the emission scores, and assign forward_var to the set # of viterbi variables we just computed forward_var = (torch.cat(viterbivars_t) + feat).view(1, -1) backpointers.append(bptrs_t) # Transition to STOP_TAG terminal_var = forward_var + self.transitions[self.tag_to_ix[STOP_TAG]] best_tag_id = argmax(terminal_var) path_score = terminal_var[0][best_tag_id] # Follow the back pointers to decode the best path. best_path = [best_tag_id] for bptrs_t in reversed(backpointers): best_tag_id = bptrs_t[best_tag_id] best_path.append(best_tag_id) # Pop off the start tag (we dont want to return that to the caller) start = best_path.pop() assert start == self.tag_to_ix[START_TAG] # Sanity check best_path.reverse() return path_score, best_path def neg_log_likelihood(self, sentence, tags): feats = self._get_lstm_features(sentence) forward_score = self._forward_alg(feats) gold_score = self._score_sentence(feats, tags) return forward_score - gold_score def forward(self, sentence): # dont confuse this with _forward_alg above. # Get the emission scores from the BiLSTM lstm_feats = self._get_lstm_features(sentence) # Find the best path, given the features. score, tag_seq = self._viterbi_decode(lstm_feats) return tag_seq print('successfully imported...') #%% # Initialize network BiLSTM_CRF # # define hyper parameter embedding_dim = 256 hidden_dim = 256 # learning_rate = 0.01 # num_layers = 1 batch_size = 1 # use_CSE = False # MAX_EPOCH = 10 # shuffle = True # batch_first = False START_TAG = "<START>"; STOP_TAG = "<STOP>" tag_to_ix = {"B": 0, "I": 1, "E": 2,'S':3, 'X':4, START_TAG: 5, STOP_TAG: 6} ix_to_tag = {y:x for x,y in tag_to_ix.items()} device = torch.device("cuda" if torch.cuda.is_available() else "cpu") torch.manual_seed(1) def initialize_model(character_size = len(letter_to_ix), tag_to_ix = tag_to_ix, embedding_dim= embedding_dim, hidden_dim= hidden_dim, batch_size=batch_size): model = BiLSTM_CRF(character_size, tag_to_ix, embedding_dim, hidden_dim,batch_size) model = model.to(device); model.train() optimizer = optim.SGD(model.parameters(), lr=0.01, weight_decay=1e-4) checkpoint = {'state_dict' : model.state_dict(), 'optimizer': optimizer.state_dict()} return model, optimizer,checkpoint # why is the lr so low here? compare to bilstm?
wuqi0704/MasterThesis_Tokenization
bilstm_crf.py
bilstm_crf.py
py
11,910
python
en
code
0
github-code
13
60312629
import logging import os from treescript.gecko import mercurial as vcs from treescript.gecko.android_l10n import android_l10n_import, android_l10n_sync from treescript.gecko.l10n import l10n_bump from treescript.gecko.merges import do_merge from treescript.gecko.versionmanip import bump_version from treescript.exceptions import TreeScriptError from treescript.util.task import get_source_repo, should_push, task_action_types log = logging.getLogger(__name__) async def perform_merge_actions(config, task, actions, repo_path): """Perform merge day related actions. This has different behaviour to other treescript actions: * Reporting on outgoing changesets has less meaning * Logging outgoing changesets can easily break with the volume and content of the diffs * We need to do more than just |hg push -r .| since we have two branches to update Args: config (dict): the running config task (dict): the running task actions (list): the actions to perform repo_path (str): the source directory to use. """ log.info("Starting merge day operations") push_activity = await do_merge(config, task, repo_path) if should_push(task, actions) and push_activity: log.info("%d branches to push", len(push_activity)) for target_repo, revision in push_activity: log.info("pushing %s to %s", revision, target_repo) await vcs.push(config, task, repo_path, target_repo=target_repo, revision=revision) async def do_actions(config, task): """Perform the set of actions that treescript can perform. The actions happen in order, tagging, ver bump, then push Args: config (dict): the running config task (dict): the running task """ work_dir = config["work_dir"] repo_path = os.path.join(work_dir, "src") actions = task_action_types(config, task) await vcs.log_mercurial_version(config) if not await vcs.validate_robustcheckout_works(config): raise TreeScriptError("Robustcheckout can't run on our version of hg, aborting") await vcs.checkout_repo(config, task, get_source_repo(task), repo_path) # Split the action selection up due to complexity in do_actions # caused by different push behaviour, and action return values. if "merge_day" in actions: await perform_merge_actions(config, task, actions, repo_path) return num_changes = 0 if "tag" in actions: num_changes += await vcs.do_tagging(config, task, repo_path) if "version_bump" in actions: num_changes += await bump_version(config, task, repo_path) if "l10n_bump" in actions: num_changes += await l10n_bump(config, task, repo_path) if "android_l10n_import" in actions: num_changes += await android_l10n_import(config, task, repo_path) if "android_l10n_sync" in actions: num_changes += await android_l10n_sync(config, task, repo_path) num_outgoing = await vcs.log_outgoing(config, task, repo_path) if num_outgoing != num_changes: raise TreeScriptError("Outgoing changesets don't match number of expected changesets!" " {} vs {}".format(num_outgoing, num_changes)) if should_push(task, actions): if num_changes: await vcs.push(config, task, repo_path, target_repo=get_source_repo(task)) else: log.info("No changes; skipping push.") await vcs.strip_outgoing(config, task, repo_path)
mozilla-releng/scriptworker-scripts
treescript/src/treescript/gecko/__init__.py
__init__.py
py
3,464
python
en
code
13
github-code
13
31625595769
""" I want to see what the distance between the ends of the peptide are for a bunch of pMHC complexes. I want to know if the ends should be treated as fixed. Given: A list of PDB entries, presumably pMHC complexes. I assume the smallest chain in each is the peptide. Print distance between first and last alpha atom of each peptide. """ import sys import os from Bio.PDB import * if len(sys.argv) > 1: pdb_ids = sys.argv[1::] pdb_l = PDBList() parser = PDBParser() i = 1 to_print = list() for pdb_id in pdb_ids: #I'm going to silence the output of the download sys.stdout = open(os.devnull, 'w') pdb_file_name = pdb_l.retrieve_pdb_file(pdb_id) structure = parser.get_structure('A' + str(i), pdb_file_name) ppb = PPBuilder() #store a list of tuples like (sequence, polypeptide). polypeptides = list() for polypeptide in ppb.build_peptides(structure): sequence = polypeptide.get_sequence() polypeptides.append((sequence, polypeptide)) sys.stdout = sys.__stdout__ #peptides are between 6 and 12 residues long. I'm being really generous here possible_peptides = list(filter(lambda x: len(x[0]) >= 6 and len(x[0]) <= 12, polypeptides)) #just print out the peptide we're gonna use. to_print.append('Will use peptide: ' + str(possible_peptides[0][0]) + ' for PDB entry: ' + pdb_id) c_alpha_atoms = possible_peptides[0][1].get_ca_list() to_print.append('distance: ' + str(c_alpha_atoms[-1] - c_alpha_atoms[0])) i += 1 for x in to_print: print(x)
mrForce/honorsThesis
measureDistance.py
measureDistance.py
py
1,663
python
en
code
0
github-code
13
38584009841
# -*- coding: utf-8 -*- """ Spyder Editor This is a temporary script file. """ import torch import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models import torch.nn as nn import torch.optim as optim import numpy as np from PIL import Image import matplotlib.pyplot as plt import torch.nn.functional as F import os transformations = transforms.Compose([ transforms.Resize(255), transforms.CenterCrop(224), transforms.RandomHorizontalFlip(), transforms.RandomRotation(20,resample= Image.BILINEAR), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) ]) train_set = datasets.ImageFolder(r"C:\Users\nmahe\Documents\Senior Year Fall\BE 495\OtoscopeImages\Training", transform = transformations) test_set = datasets.ImageFolder(r"C:\Users\nmahe\Documents\Senior Year Fall\BE 495\OtoscopeImages\Testing", transform = transformations) train_loader = torch.utils.data.DataLoader(train_set, batch_size=300, shuffle=True) test_loader = torch.utils.data.DataLoader(test_set, batch_size=20, shuffle=True) trainimages, trainlabels = next(iter(train_loader)) testimages, testlabels = next(iter(test_loader)) device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') num_epochs = 35 num_classes = 4 batch_size = 100 learning_rate = 0.001 class CNN(nn.Module): def __init__(self): super(CNN, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=10, kernel_size=3) self.conv2 = nn.Conv2d(10, 20, kernel_size=3) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(58320, 512) self.fc2 = nn.Linear(512, 16) self.fc3 = nn.Linear(16,2) self.soft = nn.Softmax(dim = 1) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(x.shape[0],-1) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) x = self.fc3(F.relu(x)) x = self.soft(x) return x model = CNN().to(device) criterion = nn.CrossEntropyLoss() optimizer = torch.optim.Adam(model.parameters(),lr = learning_rate) # keeping-track-of-losses train_losses = [] valid_losses = [] for epoch in range(1, num_epochs + 1): # keep-track-of-training-and-validation-loss train_loss = 0.0 valid_loss = 0.0 # training-the-model model.train() for data, target in train_loader: # move-tensors-to-GPU data = data.to(device) target = target.to(device) # clear-the-gradients-of-all-optimized-variables optimizer.zero_grad() # forward-pass: compute-predicted-outputs-by-passing-inputs-to-the-model output = model(data) # calculate-the-batch-loss loss = criterion(output, target) # backward-pass: compute-gradient-of-the-loss-wrt-model-parameters loss.backward() # perform-a-ingle-optimization-step (parameter-update) optimizer.step() # update-training-loss train_loss += loss.item() * data.size(0) # validate-the-model model.eval() for data, target in test_loader: data = data.to(device) target = target.to(device) output = model(data) loss = criterion(output, target) # update-average-validation-loss valid_loss += loss.item() * data.size(0) # calculate-average-losses train_loss = train_loss/len(train_loader.sampler) valid_loss = valid_loss/len(test_loader.sampler) train_losses.append(train_loss) valid_losses.append(valid_loss) # print-training/validation-statistics print('Epoch: {} \tTraining Loss: {:.6f} \tValidation Loss: {:.6f}'.format(epoch, train_loss, valid_loss)) #print('Epoch: {} \tTraining Loss: {:.6f}'.format( # epoch, train_loss))
JonathanMairena/Otoscope-Automation-and-Enhancement
classifier.py
classifier.py
py
4,153
python
en
code
0
github-code
13
29827290315
from numpy import asarray def add(A, B): n = len(A) result = [[0 for i in range(0, n)] for j in range(0, n)] for i in range(0, n): for j in range(0, n): result[i][j] = A[i][j] + B[i][j] return result def subtract(A, B): n = len(A) result = [[0 for i in range(0, n)] for j in range(0, n)] for i in range(0, n): for j in range(0, n): result[i][j] = A[i][j] - B[i][j] return result def square_matrix_multiply_strassens(A, B): n = len(A) if n == 1: C = [[0 for j in range(0, n)] for i in range(0, n)] for i in range(0, n): for j in range(0, n): C[i][j] = A[i][j] * B[i][j] return C else: # dividing the input matrices A and B new_length = int(n / 2) a11 = [[0 for i in range(0, new_length)] for j in range(0, new_length)] a12 = [[0 for i in range(0, new_length)] for j in range(0, new_length)] a21 = [[0 for i in range(0, new_length)] for j in range(0, new_length)] a22 = [[0 for i in range(0, new_length)] for j in range(0, new_length)] b11 = [[0 for i in range(0, new_length)] for j in range(0, new_length)] b12 = [[0 for i in range(0, new_length)] for j in range(0, new_length)] b21 = [[0 for i in range(0, new_length)] for j in range(0, new_length)] b22 = [[0 for i in range(0, new_length)] for j in range(0, new_length)] aTemp = [[0 for i in range(0, new_length)] for j in range(0, new_length)] bTemp = [[0 for i in range(0, new_length)] for j in range(0, new_length)] for i in range(0, new_length): for j in range(0, new_length): a11[i][j] = A[i][j] a12[i][j] = A[i][j + new_length] a21[i][j] = A[i + new_length][j] a22[i][j] = A[i + new_length][j + new_length] b11[i][j] = B[i][j] b12[i][j] = B[i][j + new_length] b21[i][j] = B[i + new_length][j] b22[i][j] = B[i + new_length][j + new_length] aTemp = add(a11, a22) bTemp = add(b11, b22) p1 = square_matrix_multiply_strassens(aTemp, bTemp) aTemp = add(a21, a22) p2 = square_matrix_multiply_strassens(aTemp, b11) bTemp = subtract(b12, b22) p3 = square_matrix_multiply_strassens(a11, bTemp) bTemp = subtract(b21, b11) p4 = square_matrix_multiply_strassens(a22, bTemp) aTemp = add(a11, a12) p5 = square_matrix_multiply_strassens(aTemp, b22) aTemp = subtract(a21, a11) bTemp = add(b11, b12) p6 = square_matrix_multiply_strassens(aTemp, bTemp) aTemp = subtract(a12, a22) bTemp = add(b21, b22) p7 = square_matrix_multiply_strassens(aTemp, bTemp) aTemp = add(p1, p4) bTemp = add(aTemp, p7) c11 = subtract(bTemp, p5) c12 = add(p3, p5) c21 = add(p2, p4) aTemp = add(p1, p3) bTemp = add(aTemp, p6) c22 = subtract(bTemp, p2) C = [[0 for i in range(0, n)] for j in range(0, n)] for i in range(0, new_length): for j in range(0, new_length): C[i][j] = c11[i][j] C[i][j + new_length] = c12[i][j] C[i + new_length][j] = c21[i][j] C[i + new_length][j + new_length] = c22[i][j] return C def test(): A = asarray([[0, 0], [0, 0]]) B = asarray([[0, 0], [0, 0]]) A = asarray(A) B = asarray(B) assert A.shape == B.shape assert A.shape == A.T.shape assert (len(A) & (len(A) - 1)) == 0, "A is not a power of 2" print(square_matrix_multiply_strassens(A, B)) pass if __name__ == '__main__': test()
VaishakVellore/Python-Simple-Examples
Strassen.py
Strassen.py
py
3,555
python
en
code
0
github-code
13
33697568262
import logging import os import time import configparser import sqlalchemy from sqlalchemy import INT, TIMESTAMP, Boolean, Column, String, Table, func from sqlalchemy.ext.compiler import compiles from sqlalchemy.schema import MetaData from sqlalchemy.sql.expression import delete, insert, text, update logger = logging.getLogger() CONFIG_PATH = os.path.expanduser('~/.secret/credentials') class BaseModel(object): def __init__(self, engine, metadata, table, role='reader'): self.engine = engine self.metadata = metadata self.table = table self.role = role def execute(self, stmt): return self.engine.execute(stmt) def new_engine_and_metadata(db_conf=None): if os.path.isfile(CONFIG_PATH): config = configparser.ConfigParser() config.read(CONFIG_PATH) db_conf = dict(config['db']) else: raise FileNotFoundError settings = { 'max_overflow': -1, 'pool_size': 8, 'pool_recycle': 1024, 'pool_timeout': 800, } db_connection_str = 'mysql://{username}:{password}@{host}:{port}/{db_name}'.format_map(db_conf) engine = sqlalchemy.create_engine(db_connection_str, **settings) metadata = MetaData(bind=engine) return engine, metadata class HK01Progress(BaseModel): def __init__(self, engine, metadata, role='reader'): table = Table( 'hk01_progress', metadata, Column('id', INT, primary_key=True, autoincrement=True), Column('article_id', INT), Column('crawl_ts', INT), Column('path', String), Column('updated_at', TIMESTAMP, default=func.now()), ) super().__init__(engine, metadata, table, role) def get_article_path(self, article_id): stmt = text('SELECT crawl_ts, path FROM hk01_progress WHERE article_id = {}'.format(article_id)) row = self.execute(stmt).fetchone() logger.info("DB Record of article_id = {article_id} : {row}".format_map({'article_id': article_id, 'row': row})) if row: return {'crawl_ts': row[0], 'path': row[1]} else: return None def insert_article_progress(self, article_id, path): row = {'article_id': int(article_id), 'crawl_ts': int(time.time()), 'path': str(path)} stmt = text( 'INSERT INTO hk01_progress (article_id, crawl_ts, path) VALUES ({article_id}, {crawl_ts}, "{path}") ON DUPLICATE KEY UPDATE article_id = {article_id}'.format_map(row)) self.execute(stmt) def get_all_article_ids(self): stmt = text('SELECT distinct(article_id) FROM hk01_progress ORDER BY article_id ASC') cursor = self.execute(stmt) row = cursor.fetchone() while row: yield row[0] row = cursor.fetchone() def get_last_crawled_article_id(self): stmt = text('SELECT distinct(article_id) FROM hk01_progress ORDER BY article_id DESC LIMIT 1') cursor = self.execute(stmt) return cursor.fetchone()[0]
payt0nc/news_crawler
HK01/HK01/database.py
database.py
py
3,054
python
en
code
0
github-code
13
6609553409
import Parser from os import listdir import time import re import json class DocumentProcessing: def __init__(self, directory_path, indexfile): # directory containint files, files # containing many documents self.directory_path = directory_path # store dict, posting_list as # indexfile.dict, indexfile.idx self.indexfile = indexfile; # list of file names in directory_path # cold function self.files = self.getFiles() # stores unique words, unique documents self.parser = Parser.Parser() # call self.parser.feed() for all documents # It will collect all unique words in parser ###### HOT FUNCTION self.collectWords() # dictionary of unique words # seen in all documents self.dict = dict(zip(self.parser.words, range(len(self.parser.words)))) # stores the posting list self.posting_list_parser = Parser.PostingListParser(self.parser.words) # given dictionary, and list of all file names # posting_list: posting list for each word in dictionary # type: dict(word : list(document_ids)) # Note: duplicate document ids in a posting list is # allowed and helps calculate freq of that # word in that duplicate document. ###### HOT FUNCTION self.updatePostingList() # write dictionary and posting list to disk # in binary form(smallest possible size). self.dump() def getFiles(self): """ given self.directory_path, returns list of file names without prefix path """ return listdir(self.directory_path) def collectWords(self): """ input: implicite. self.parser() object, self.files output: implicite. writes to parser.words semantics: calls parser.feed() method for all documents to collect unique words. ###### HOT FUNCTION """ for f in self.files: with open(self.directory_path + '/' + f, 'r') as file: # reading the whole file and removing '\n' and '`' noise lots_of_docs = file.read() lots_of_docs = re.sub('\n', ' ', lots_of_docs) lots_of_docs = re.sub('`', '', lots_of_docs) self.parser.appendToWords(lots_of_docs) # sorting all words for *query self.parser.sort() del(lots_of_docs) def updatePostingList(self): """ input: implicite. self.dict, self.files output: updates self.posting_list_parser.posting_list semantics: for all words in dictionary, store corresponding posting list in self.posting_list_parser ###### HOT FUNCTION """ for f in self.files: with open(self.directory_path + '/' + f, 'r') as file: lots_of_docs = file.read() lots_of_docs = re.sub('\n', ' ', lots_of_docs) lots_of_docs = re.sub('`', '', lots_of_docs) self.posting_list_parser.appendToPostingList(lots_of_docs) del(lots_of_docs) def dump(self): """ input: implicite. self.dict, self.posting_list output: implicite. Write dict in indexfile.dict and write posting_list in indexfile.idx semantics: write self.dict and self.posting_list in binary(s.t. it occupies smallest size) """ json.dump(self.dict, open(self.indexfile+'.dict', 'w')) json.dump(self.posting_list_parser.posting_list, open(self.indexfile+'.idx', 'w')) # data = json.load( open( "file_name.json" ) )
Dwijesh522/Information_Retrieval_Assns
assn1/DocumentProcessing.py
DocumentProcessing.py
py
4,006
python
en
code
0
github-code
13
944330105
#!/usr/bin/env python # coding: utf-8 # In[ ]: import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns #get_ipython().run_line_magic('matplotlib', 'inline') import streamlit as st from collections import defaultdict # ### Check out the Data # https://www.youtube.com/watch?v=JwSS70SZdyM&t=7099s # In[256]: st.write("# Housing Prediction App") USAhousing = pd.read_csv('USA_Housing.csv') st.subheader("Preview the data") st.write(USAhousing.head()) # In[257]: USAhousing.head() # In[258]: USAhousing.info() # In[259]: USAhousing.describe() st.sidebar.header("User Housing info") # In[260]: #USAhousing.columns # # EDA # # Let's create some simple plots to check out the data! # In[261]: st.header("Get a pairplot of the variables") pairplot = sns.pairplot(USAhousing) st.pyplot(pairplot) # In[262]: st.header("Get a distribution plot of the variables") #streamlit displays matplotlib figure object so create a figure and axis and create your plot on there distplot_fig, ax = plt.subplots() ax = sns.distplot(USAhousing['Price']) st.pyplot(distplot_fig) # In[263]: st.header("Get the heatmap of the variables") htmap_fig, ax = plt.subplots() htmap= sns.heatmap(USAhousing.corr()) st.pyplot(htmap_fig) # ## Training a Linear Regression Model # # Let's now begin to train out regression model! We will need to first split up our data into an X array that contains the features to train on, and a y array with the target variable, in this case the Price column. We will toss out the Address column because it only has text info that the linear regression model can't use. # # ### X and y arrays # In[264]: X = USAhousing[['Avg. Area Income', 'Avg. Area House Age', 'Avg. Area Number of Rooms', 'Avg. Area Number of Bedrooms', 'Area Population']] y = USAhousing['Price'] def get_user_housing_data(): income = st.sidebar.number_input('Income', min_value =1000, max_value =100000) age = st.sidebar.number_input("Age", min_value=18, max_value=67) rooms = st.sidebar.number_input("No. of Rooms", min_value=1, max_value=6) bedrooms = st.sidebar.number_input("No. of BedromsRooms", min_value=1, max_value=6) population = st.sidebar.number_input("Population", min_value=1000000, max_value=5000000) values = defaultdict(int) values['income']=income values['age'] =age values["rooms"] = rooms values["bedrooms"] = bedrooms values["population"] = population return pd.DataFrame(data=values, index=[0]) df = get_user_housing_data() st.subheader("User submitted Housing Data") st.write(df) # ## Train Test Split # # Now let's split the data into a training set and a testing set. We will train out model on the training set and then use the test set to evaluate the model. # In[265]: from sklearn.model_selection import train_test_split # In[266]: X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.4, random_state=101) # ## Creating and Training the Model # In[267]: from sklearn.linear_model import LinearRegression # In[268]: lm = LinearRegression() # In[269]: lm.fit(X_train,y_train) # ## Model Evaluation # # Let's evaluate the model by checking out it's coefficients and how we can interpret them. # In[270]: # print the intercept st.subheader("Intercept") st.write(lm.intercept_) # In[277]: st.subheader("Co-efficients") coeff_df = pd.DataFrame(lm.coef_,X.columns,columns=['Coefficient']) st.write(coeff_df) # Interpreting the coefficients: # # - Holding all other features fixed, a 1 unit increase in **Avg. Area Income** is associated with an **increase of \$21.52 **. # - Holding all other features fixed, a 1 unit increase in **Avg. Area House Age** is associated with an **increase of \$164883.28 **. # - Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Rooms** is associated with an **increase of \$122368.67 **. # - Holding all other features fixed, a 1 unit increase in **Avg. Area Number of Bedrooms** is associated with an **increase of \$2233.80 **. # - Holding all other features fixed, a 1 unit increase in **Area Population** is associated with an **increase of \$15.15 **. # # Does this make sense? Probably not because I made up this data. If you want real data to repeat this sort of analysis, check out the [boston dataset](http://scikit-learn.org/stable/modules/generated/sklearn.datasets.load_boston.html): # # # from sklearn.datasets import load_boston # boston = load_boston() # print(boston.DESCR) # boston_df = boston.data # ## Predictions from our Model # # Let's grab predictions off our test set and see how well it did! # In[279]: # predictions = lm.predict(X_test.head(1)) predictions = lm.predict(df) st.subheader("X-test") X_test # In[282]: st.subheader("Predictions") st.write(predictions) st.write(type(predictions)) if len(predictions==1): pass else: st.subheader("Predictions Plot") prediction_fig , ax = plt.subplots() ax = plt.scatter(y_test,predictions) st.pyplot(prediction_fig) # **Residual Histogram** # In[281]: st.subheader("Residuals Histogram") residual_fig, ax = plt.subplots() ax = sns.distplot((y_test-predictions),bins=50); st.pyplot(residual_fig) # ## Regression Evaluation Metrics # # # Here are three common evaluation metrics for regression problems: # # **Mean Absolute Error** (MAE) is the mean of the absolute value of the errors: # # $$\frac 1n\sum_{i=1}^n|y_i-\hat{y}_i|$$ # # **Mean Squared Error** (MSE) is the mean of the squared errors: # # $$\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2$$ # # **Root Mean Squared Error** (RMSE) is the square root of the mean of the squared errors: # # $$\sqrt{\frac 1n\sum_{i=1}^n(y_i-\hat{y}_i)^2}$$ # # Comparing these metrics: # # - **MAE** is the easiest to understand, because it's the average error. # - **MSE** is more popular than MAE, because MSE "punishes" larger errors, which tends to be useful in the real world. # - **RMSE** is even more popular than MSE, because RMSE is interpretable in the "y" units. # # All of these are **loss functions**, because we want to minimize them. # In[275]: from sklearn import metrics # In[276]: if len(predictions==1): pass else: print('MAE:', metrics.mean_absolute_error(y_test, predictions)) print('MSE:', metrics.mean_squared_error(y_test, predictions)) print('RMSE:', np.sqrt(metrics.mean_squared_error(y_test, predictions))) #saving the model # Saving the model import pickle pickle.dump(lm, open('housing_clf.pkl', 'wb')) # This was your first real Machine Learning Project! Congrats on helping your neighbor out! We'll let this end here for now, but go ahead and explore the Boston Dataset mentioned earlier if this particular data set was interesting to you! # # Up next is your own Machine Learning Project! # # ## Great Job!
benadaba/LR
LinearRegressionHousingPrediction.py
LinearRegressionHousingPrediction.py
py
6,886
python
en
code
0
github-code
13
31564013684
# File with a collection of functions from larda import numpy as np import matplotlib import matplotlib.pyplot as plt from scipy import stats import datetime from copy import copy def test_function(file): print(file) def argnearest(array, value): """find the index of the nearest value in a sorted array for example time or range axis Args: array (np.array): sorted array with values, list will be converted to 1D array value: value to find Returns: index """ if type(array) == list: array = np.array(array) i = np.searchsorted(array, value) - 1 if not i == array.shape[0] - 1: if np.abs(array[i] - value) > np.abs(array[i + 1] - value): i = i + 1 return i def ident(x): return x def raw2Z(array, **kwargs): """raw signal units (MRR-Pro) to reflectivity Z""" return array * kwargs['wl']**4 / (np.pi**5) / 0.93 * 10**6 def divide_by(val): return lambda var: var / val def lin2z(array): """linear values to dB (for np.array or single number)""" return 10 * np.ma.log10(array) def z2lin(array): """dB to linear values (for np.array or single number)""" return 10 ** (array / 10.) def plot_scatter(data_container1, data_container2, identity_line=True, **kwargs): """scatter plot for variable comparison between two devices or variables Args: data_container1 (dict): container 1st device data_container2 (dict): container 2nd device x_lim (list): limits of var used for x axis y_lim (list): limits of var used for y axis c_lim (list): limits of var used for color axis **identity_line (bool): plot 1:1 line if True **z_converter (string): convert var before plotting use eg 'lin2z' **var_converter (string): alternate name for the z_converter **custom_offset_lines (float): plot 4 extra lines for given distance **info (bool): print slope, interception point and R^2 value **fig_size (list): size of the figure in inches **fontsize (int): default: 15 **fonteight (int): default: semibold **colorbar (bool): if True, add a colorbar to the scatterplot **color_by (dict): data container 3rd device **scale (string): 'lin' or 'log' --> if you get a ValueError from matplotlib.colors try setting scale to lin, log does not work for negative values! **cmap (string) : colormap **nbins (int) : number of bins for histograms Returns: ``fig, ax`` """ var1_tmp = data_container1 var2_tmp = data_container2 combined_mask = np.logical_or(var1_tmp['mask'], var2_tmp['mask']) colormap = kwargs['cmap'] if 'cmap' in kwargs else 'viridis' if 'var_converter' in kwargs: kwargs['z_converter'] = kwargs['var_converter'] # convert var from linear unit with any converter given in helpers if 'z_converter' in kwargs and kwargs['z_converter'] != 'log': var1 = get_converter_array(kwargs['z_converter'])[0](var1_tmp['var'][~combined_mask].ravel()) var2 = get_converter_array(kwargs['z_converter'])[0](var2_tmp['var'][~combined_mask].ravel()) else: var1 = var1_tmp['var'][~combined_mask].ravel() # +4.5 var2 = var2_tmp['var'][~combined_mask].ravel() x_lim = kwargs['x_lim'] if 'x_lim' in kwargs else [np.nanmin(var1), np.nanmax(var1)] y_lim = kwargs['y_lim'] if 'y_lim' in kwargs else [np.nanmin(var2), np.nanmax(var2)] fig_size = kwargs['fig_size'] if 'fig_size' in kwargs else [6, 6] fig_size[0] = fig_size[0]+2 if 'colorbar' in kwargs and kwargs['colorbar'] else fig_size[0] fontweight = kwargs['fontweight'] if 'fontweight' in kwargs else'semibold' fontsize = kwargs['fontsize'] if 'fontsize' in kwargs else 15 nbins = 120 if not 'nbins' in kwargs else kwargs['nbins'] # create histogram plot s, i, r, p, std_err = stats.linregress(var1, var2) H, xedges, yedges = np.histogram2d(var1, var2, bins=nbins, range=[x_lim, y_lim]) if 'color_by' in kwargs: print("Coloring scatter plot by {}...\n".format(kwargs['color_by']['name'])) # overwrite H H = np.zeros(H.shape) var3 = kwargs['color_by']['var'][~combined_mask].ravel() # get the bins of the 2d histogram using digitize x_coords = np.digitize(var1, xedges) y_coords = np.digitize(var2, yedges) # find unique bin combinations = pixels in scatter plot # sort x and y coordinates using lexsort # lexsort sorts by multiple columns, first by y_coords then by x_coords newer_order = np.lexsort((x_coords, y_coords)) x_coords = x_coords[newer_order] y_coords = y_coords[newer_order] var3 = var3[newer_order] first_hit_y = np.searchsorted(y_coords, np.arange(1, nbins+2)) first_hit_y.sort() first_hit_x = [np.searchsorted(x_coords[first_hit_y[j]:first_hit_y[j + 1]], np.arange(1, nbins + 2)) + first_hit_y[j] for j in np.arange(nbins)] for x in range(nbins): for y in range(nbins): H[y, x] = np.nanmedian(var3[first_hit_x[x][y]: first_hit_x[x][y + 1]]) X, Y = np.meshgrid(xedges, yedges) fig, ax = plt.subplots(1, figsize=fig_size) if not 'scale' in kwargs or kwargs['scale']=='log': formstring = "%.2E" if not 'c_lim' in kwargs: pcol = ax.pcolormesh(X, Y, np.transpose(H), norm=matplotlib.colors.LogNorm(), cmap=colormap) else: pcol = ax.pcolormesh(X, Y, np.transpose(H), norm=matplotlib.colors.LogNorm(vmin=kwargs['c_lim'][0], vmax=kwargs['c_lim'][1]), cmap=colormap) elif kwargs['scale'] == 'lin': formstring = "%.2f" if not 'c_lim' in kwargs: kwargs['c_lim'] = [np.nanmin(H), np.nanmax(H)] pcol = ax.pcolormesh(X, Y, np.transpose(H), vmin=kwargs['c_lim'][0], vmax=kwargs['c_lim'][1], cmap=colormap) if 'info' in kwargs and kwargs['info']: ax.text(0.01, 0.93, 'slope = {:5.3f}\nintercept = {:5.3f}\nR^2 = {:5.3f}'.format(s, i, r ** 2), horizontalalignment='left', verticalalignment='center', transform=ax.transAxes, fontweight=fontweight, labelsize=fontsize) # helper lines (1:1), ... if identity_line: add_identity(ax, color='salmon', ls='-') if 'custom_offset_lines' in kwargs: offset = np.array([kwargs['custom_offset_lines'], kwargs['custom_offset_lines']]) for i in [-2, -1, 1, 2]: ax.plot(x_lim, x_lim + i * offset, color='salmon', linewidth=0.7, linestyle='--') ax.set_xlim(x_lim) ax.set_ylim(y_lim) if 'z_converter' in kwargs and kwargs['z_converter'] == 'log': #ax.set_xscale('log') ax.set_yscale('log') ax.set_xlabel('{} {} [{}]'.format(var1_tmp['system'], var1_tmp['name'], var1_tmp['var_unit']), fontweight=fontweight, fontsize=fontsize) ax.set_ylabel('{} {} [{}]'.format(var2_tmp['system'], var2_tmp['name'], var2_tmp['var_unit']), fontweight=fontweight, fontsize=fontsize) ax.xaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) if 'colorbar' in kwargs and kwargs['colorbar']: c_lim = kwargs['c_lim'] if 'c_lim' in kwargs else [1, round(H.max(), int(np.log10(max(np.nanmax(H), 10.))))] cmap = copy(plt.get_cmap(colormap)) cmap.set_under('white', 1.0) cbar = fig.colorbar(pcol, use_gridspec=True, extend='min', extendrect=True, extendfrac=0.01, shrink=0.8, format=formstring) if not 'color_by' in kwargs: cbar.set_label(label="frequency of occurrence", fontweight=fontweight, fontsize=fontsize) else: cbar.set_label(label="median {} [{}]".format(kwargs['color_by']['name'], kwargs['color_by']['var_unit']), fontweight=fontweight, fontsize=fontsize) cbar.set_clim(c_lim) cbar.aspect = 50 if 'title' in kwargs: if kwargs['title'] == True: ax.set_title(data_container1['paraminfo']['location'] + ts_to_dt(data_container1['ts'][0]).strftime(" %Y-%m-%d %H:%M - ") + ts_to_dt(data_container1['ts'][-1]).strftime("%Y-%m-%d %H:%M"), fontweight=fontweight, fontsize=fontsize) else: ax.set_title(kwargs['title'], fontweight=fontweight, fontsize=fontsize) plt.grid(b=True, which='major', color='black', linestyle='--', linewidth=0.5, alpha=0.5) #ax.tick_params(axis='both', which='both', right=True, top=True) ax.yaxis.set_minor_locator(matplotlib.ticker.AutoMinorLocator()) ax.tick_params(axis='both', which='both', right=True, top=True) ax.tick_params(axis='both', which='major', labelsize=fontsize, width=3, length=5.5) ax.tick_params(axis='both', which='minor', width=2, length=3) if 'colorbar' in kwargs and kwargs['colorbar']: cbar.ax.tick_params(axis='both', which='major', labelsize=fontsize-2, width=2, length=4) return fig, ax def add_identity(axes, *line_args, **line_kwargs): """helper function for the scatter plot""" identity, = axes.plot([], [], *line_args, **line_kwargs) def callback(axes): low_x, high_x = axes.get_xlim() low_y, high_y = axes.get_ylim() low = max(low_x, low_y) high = min(high_x, high_y) identity.set_data([low, high], [low, high]) callback(axes) axes.callbacks.connect('xlim_changed', callback) axes.callbacks.connect('ylim_changed', callback) return axes def dt_to_ts(dt): """datetime to unix timestamp""" # return dt.replace(tzinfo=datetime.timezone.utc).timestamp() return (dt - datetime.datetime(1970, 1, 1)).total_seconds() def ts_to_dt(ts): """unix timestamp to dt""" return datetime.datetime.utcfromtimestamp(ts) def seconds_since_2001_to_unix(times): return np.array([x + dt_to_ts(datetime.datetime(2001, 1, 1)) for x in list(times)]) def get_converter_array(string, **kwargs): """colletion of converters that works on arrays combines time, range and varconverters (i see no conceptual separation here) the maskconverter becomes relevant, if the order is no time, range, whatever (as in mira spec) Returns: (varconverter, maskconverter) which both are functions """ if string == 'since20010101': return lambda x: x + dt_to_ts(datetime.datetime(2001, 1, 1)), ident elif string == 'unix': return lambda x: x, ident elif string == 'since19691231': return lambda x: x + dt_to_ts(datetime.datetime(1969, 12, 31, 23)), ident elif string == 'since19700101': return lambda x: x + dt_to_ts(datetime.datetime(1970, 1, 1)), ident elif string == 'beginofday': if 'ncD' in kwargs.keys(): return (lambda h: (h.astype(np.float64) * 3600. + \ float(dt_to_ts(datetime.datetime(kwargs['ncD'].year, kwargs['ncD'].month, kwargs['ncD'].day)))), ident) elif string == "hours_since_year0": return (lambda x: x*24*60*60 - 62167305599.99999, ident) elif string == "pollytime": return (lambda x: np.array([x[i,1] + dt_to_ts(datetime.datetime.strptime(str(int(x[i,0])), "%Y%m%d"))\ for i in range(x.shape[0])]), ident) elif string == "km2m": return lambda x: x * 1000., ident elif string == "sealevel2range": return lambda x: x - kwargs['altitude'], ident elif string == 'z2lin': return z2lin, ident elif string == 'lin2z': return lin2z, ident elif string == 'switchsign': return lambda x: -x, ident elif string == "mira_azi_offset": return lambda x: (x + kwargs['mira_azi_zero']) % 360, ident elif string == 'transposedim': return np.transpose, np.transpose elif string == 'transposedim+invert3rd': return transpose_and_invert, transpose_and_invert elif string == 'divideby2': return divide_by(2.), ident elif string == 'keepNyquist': return ident, ident elif string == 'raw2Z': return raw2Z(**kwargs), ident elif string == "extract_level0": return lambda x: x[:, 0], ident elif string == "extract_level1": return lambda x: x[:, 1], ident elif string == "extract_level2": return lambda x: x[:, 2], ident elif string == 'extract_1st': return lambda x: np.array(x[0])[np.newaxis,], ident elif string == "none": return ident, ident else: raise ValueError("converter {} not defined".format(string)) def transpose_and_invert(var): return np.transpose(var)[:, :, ::-1]
ti-vo/calibrate_Wband
function_library.py
function_library.py
py
13,311
python
en
code
0
github-code
13
8868690619
# -*- coding: utf-8 -*- """ Created on Sun Dec 6 02:07:37 2020 @author: apmle """ '''This is a code to calculated the weighted average of grades of a student ''' grades=[] weight_list=[] product=[] name=input("What is the student name?\n") i=1 while True: grade=float(input(f"What is the student grade #{i}?\n")) grades.append(grade) weight=float(input(f"What is the grade weight for the grade #{i}?\n")) weight_list.append(weight) i+=1 if i >2: break for g,w in zip(grades,weight_list): product.append(g*w) media=round((sum(product)/sum(weight_list)),2) print(f"The weighted average grade of the student {name} was {media}.")
FabioRochaPoeta/Python-v1-Ana
Weighted average of grades.py
Weighted average of grades.py
py
712
python
en
code
1
github-code
13
38013008730
import os class Color: # ANSI color codes BLACK = "\033[30m" RED = "\033[31m" GREEN = "\033[32m" YELLOW = "\033[33m" BLUE = "\033[34m" MAGENTA = "\033[35m" CYAN = "\033[36m" WHITE = "\033[37m" RESET = "\033[0m" NAMES = { "black": BLACK, "red": RED, "green": GREEN, "yellow": YELLOW, "blue": BLUE, "magenta": MAGENTA, "cyan": CYAN, "white": WHITE, "reset": RESET, } @staticmethod def cprint(text, color="reset"): """ Prints colored text to the console. Parameters: text (str): The text to print. color (str): The color to use, specified as a color name or ANSI code. """ if color in Color.NAMES: color_code = Color.NAMES[color] else: color_code = color print(f"\033[1m{color_code}{text}{Color.RESET}") def example_path(file): return os.path.join(os.path.dirname(__file__),"data",file) def ppause(data,color=Color.RESET,print_newline=True): Color.cprint(data,color) input("Press Enter to continue...") print("\033[F\033[K", end="") if print_newline:print("\n")
AbdulWahab321/HyperDS
examples/__init__.py
__init__.py
py
1,230
python
en
code
2
github-code
13
42675693334
import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVR from sklearn.metrics import roc_curve from sklearn.ensemble import RandomForestRegressor import lightgbm as lgb import xgboost as xgb import numpy as np def extract_base_feat(): user_info_train = pd.read_csv('../data/train/user_info_train_80.csv', header=None) user_info_test_A = pd.read_csv('../data/test/user_info_test_A.csv', header=None) user_info_test_B = pd.read_csv('../data/test/user_info_test_B.csv', header=None) col_names = ['userid', 'sex', 'occupation', 'education', 'marriage', 'household'] user_info_train.columns = col_names user_info_test_A.columns = col_names user_info_test_B.columns = col_names user_info = pd.concat([user_info_train, user_info_test_A, user_info_test_B]) user_info.index = user_info['userid'] user_info.drop('userid', axis=1, inplace=True) bank_detail_train = pd.read_csv('../data/train/bank_detail_train_80.csv', header=None) bank_detail_test_A = pd.read_csv('../data/test/bank_detail_test_A.csv', header=None) bank_detail_test_B = pd.read_csv('../data/test/bank_detail_test_B.csv', header=None) col_names = ['userid', 'tm_encode', 'trade_type', 'trade_amount', 'salary_tag'] bank_detail_train.columns = col_names bank_detail_test_A.columns = col_names bank_detail_test_B.columns = col_names bank_detail = pd.concat([bank_detail_train, bank_detail_test_A, bank_detail_test_B]) bank_detail['tm_encode'] = bank_detail['tm_encode'] // 86400 bank_detail_n = (bank_detail.loc[:, ['userid', 'trade_type', 'trade_amount', 'tm_encode']]).groupby( ['userid', 'trade_type']).mean() bank_detail_n = bank_detail_n.unstack() bank_detail_n.columns = ['income', 'outcome', 'income_tm', 'outcome_tm'] browse_history_train = pd.read_csv('../data/train/browse_history_train_80.csv', header=None) browse_history_test_A = pd.read_csv('../data/test/browse_history_test_A.csv', header=None) browse_history_test_B = pd.read_csv('../data/test/browse_history_test_B.csv', header=None) col_names = ['userid', 'tm_encode_2', 'browse_data', 'browse_tag'] browse_history_train.columns = col_names browse_history_test_A.columns = col_names browse_history_test_B.columns = col_names browse_history = pd.concat([browse_history_train, browse_history_test_A, browse_history_test_B]) browse_history_count = browse_history.loc[:, ['userid', 'browse_data']].groupby(['userid']).count() bill_detail_train = pd.read_csv('../data/train/bill_detail_train_80.csv', header=None) bill_detail_test_A = pd.read_csv('../data/test/bill_detail_test_A.csv', header=None) bill_detail_test_B = pd.read_csv('../data/test/bill_detail_test_B.csv', header=None) col_names = ['userid', 'tm_encode_3', 'bank_id', 'prior_account', 'prior_repay', 'credit_limit', 'account_balance', 'minimun_repay', 'consume_count', 'account', 'adjust_account', 'circulated_interest', 'avaliable_balance', 'cash_limit', 'repay_state'] bill_detail_train.columns = col_names bill_detail_test_A.columns = col_names bill_detail_test_B.columns = col_names bill_detail = pd.concat([bill_detail_train, bill_detail_test_A, bill_detail_test_B]) bill_detail['tm_encode_3'] = bill_detail['tm_encode_3'] // 86400 bill_detail_mean = bill_detail.groupby(['userid']).mean() bill_detail_mean.drop('bank_id', axis=1, inplace=True) loan_time_train = pd.read_csv('../data/train/loan_time_train_80.csv', header=None) loan_time_test_A = pd.read_csv('../data/test/loan_time_test_A.csv', header=None) loan_time_test_B = pd.read_csv('../data/test/loan_time_test_B.csv', header=None) loan_time = pd.concat([loan_time_train, loan_time_test_A, loan_time_test_B]) loan_time.columns = ['userid', 'loan_time'] loan_time['loan_time'] = loan_time['loan_time'] // 86400 loan_time.index = loan_time['userid'] loan_time.drop('userid', axis=1, inplace=True) target_train = pd.read_csv('../data/train/overdue_train_80.csv', header=None) target_test_A = pd.read_csv('../data/test/overdue_test_A.csv', header=None) target_test_B = pd.read_csv('../data/test/overdue_test_B.csv', header=None) target = pd.concat([target_train, target_test_A, target_test_B]) target.columns = ['userid', 'label'] target.index = target['userid'] target.drop(['userid', 'label'], axis=1, inplace=True) feature = pd.merge(target, user_info, how='left', left_index=True, right_index=True) feature = pd.merge(feature, bank_detail_n, how='left', left_index=True, right_index=True) feature = pd.merge(feature, bill_detail_mean, how='left', left_index=True, right_index=True) feature = pd.merge(feature, browse_history_count, how='left', left_index=True, right_index=True) feature = pd.merge(feature, loan_time, how='left', left_index=True, right_index=True) feature['time'] = feature['loan_time'] - feature['tm_encode_3'] feature['time1'] = (feature['loan_time'] > feature['tm_encode_3']).astype('int') print(feature.shape) feature.to_csv("../cache/feature_basic") if __name__ == '__main__': extract_base_feat()
squirrelmaster/rong360-8
src/base/extract_base.py
extract_base.py
py
5,230
python
en
code
0
github-code
13
3647024217
#!/usr/bin/env python # encoding: utf-8 """ ============================================== objectName: InsterAutoTest_w fileName: log_pane Author: Hang Date: 2020/4/13/013 description: ============================================== """ import sys from time import sleep from PyQt5 import QtGui from PyQt5.QtCore import QObject, Qt from PyQt5.QtGui import QIcon, QTextCursor, QTextCharFormat, QTextImageFormat, QTextDocumentFragment, QTextListFormat, \ QTextTableFormat, QTextFrameFormat from PyQt5.QtWidgets import QApplication, QWidget, QLabel, qApp, QPushButton, QLineEdit, QAction, QCompleter, QTextEdit from ui_log_Pane import Ui_Form class Window(QWidget, Ui_Form): def __init__(self): super().__init__() self.num = 0 self.setWindowTitle("") self.setWindowTitle("") self.resize(500, 500) self.setupUi(self) self.setup_ui() def setup_ui(self): pass # ================================== start def hang(self, num): self.log_textEdit.append(num) # ================================== end if __name__ == '__main__': # 测试 # 判断当前模块是被导入执行还是直接执行 app = QApplication(sys.argv) window = Window() # window = QWidget() window.show() sys.exit(app.exec_())
HangAndy/InsterAutoTest_w
log_pane.py
log_pane.py
py
1,368
python
en
code
0
github-code
13
37000097300
from flask import redirect, render_template, request, flash, url_for, abort, jsonify from . import bp from belka.models import db, Api, Field, Data @bp.get('/<api_name>') def items(api_name): """ ?search.{field} ?page ?pagesize """ q = db.select(Api).filter_by(active=True, name=api_name) api = db.one_or_404(q, description='API не найден. Удалили, может?') q = db.select(Data).filter_by(api_id=api.id).order_by(Data.sort) data = db.session.execute(q).scalars().all() result = [] for obj in data: item = { 'id': obj.id } ok = True for field in api.fields: val = obj.content.get(field.name) search_param = f'search.{field.name}' if search_param in request.args: if field.type != 'string': abort(400, 'Нельзя искать по не-текстовым полям.') if request.args[search_param].lower() not in val.lower(): ok = False break item[field.name] = val if ok: result.append(item) page = request.args.get('page', 0, type=int) pagesize = request.args.get('page_size', 20, type=int) total = len(result) result = result[page * pagesize : (page + 1) * pagesize] resp = jsonify({'total': total, 'results': result}) resp.headers['X-Total'] = str(total) return resp @bp.get('/<api_name>/<int:obj_id>') def item(api_name, obj_id): """ ?search.{field} ?page ?pagesize """ q = db.select(Api).filter_by(active=True, name=api_name) api = db.one_or_404(q, description='API не найден. Удалили, может?') q = db.select(Data).filter_by(api_id=api.id, id=obj_id).order_by(Data.sort) obj = db.session.execute(q).scalar_one_or_none() if not obj: abort(404, 'Записи с таким ID не существует.') item = { 'id': obj.id } for field in api.fields: val = obj.content.get(field.name) item[field.name] = val return jsonify(item)
uisky/belka
belka/api/views.py
views.py
py
2,156
python
en
code
0
github-code
13
21794102963
class Calculator: def __init__(self, num): self.num = num self.buffer = [] def plus(self, value): self.buffer.append( ('plus', value) ) return self def minus(self, value): self.buffer.append( ('minus', value) ) return self def calc(self): for oper, value in self.buffer: if oper == 'plus': self.num += value elif oper == 'minus': self.num -= value return self.num print(Calculator(10).plus(5).minus(7).calc())
lokosuns/working_list
test.py
test.py
py
590
python
en
code
0
github-code
13
327661051
from pathlib import Path from filesystem import FileSystem if __name__ == "__main__": terminal_text = Path("input.txt").read_text() fs = FileSystem(terminal_text) free_space_required = 30000000 space_to_free = free_space_required - fs.free_space # Find candidate directories to delete deletion_candidates = fs.find(lambda dir: dir.size >= space_to_free) # Get the size of the smallest of these smallest_size = min(dir.size for dir in deletion_candidates) print(smallest_size)
grey-area/advent-of-code-2022-copilot
day07/part2.py
part2.py
py
516
python
en
code
1
github-code
13
12740620233
from torch import nn from torchvision.models import ResNet from torchvision.models.resnet import BasicBlock import torch.utils.model_zoo as model_zoo import torch model_urls = { 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth', 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth', 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth', 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth', 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth', } class ResNet18FeatureExtractor(ResNet): ''' Feature Extractor from ResNet 18 ''' def __init__(self, num_classes, pretrained=True): self.num_classes = num_classes self.pretrained = pretrained super(ResNet18FeatureExtractor, self).__init__(block=BasicBlock, layers=[2, 2, 2, 2],) if pretrained: self.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) return x class ResNet18FeatureExtractorBernoulli(ResNet): ''' Feature Extractor from ResNet 18 ''' def __init__(self, num_classes=1, pretrained=True): self.num_classes = num_classes self.pretrained = pretrained super(ResNet18FeatureExtractorBernoulli, self).__init__(block=BasicBlock, layers=[2, 2, 2, 2], num_classes=num_classes) self.fc = nn.Linear(512 * BasicBlock.expansion, num_classes) if pretrained: state_dict = model_zoo.load_url(model_urls['resnet18']) state_dict['fc.bias'] = torch.rand(num_classes) state_dict['fc.weight'] = torch.rand((num_classes, 512)) self.load_state_dict(state_dict) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = self.avgpool(x) x = x.view(x.size(0), -1) x = self.fc(x) return x
AlbertoCastelo/bayesian-dl-medical-diagnosis
deep_gp/models/resnet18.py
resnet18.py
py
2,415
python
en
code
0
github-code
13
6773161788
#!/home/dh_nfjxcr/opt/python-3.8.2/bin/python3 import sys, os #INTERP = os.path.join(os.environ['HOME'], 'opt', 'python-3.8.2', 'bin', 'python3') INTERP = os.path.join(os.environ['HOME'], 'artishan.io', 'venv', 'bin', 'python3') if sys.executable != INTERP: print("Adding Path") os.execl(INTERP, INTERP, *sys.argv) sys.path.append(os.getcwd()) sys.path.append(os.getcwd() + '/myapp') #sys.path.append('home/artishan.io/myapp') sys.path.append(os.path.join(os.environ['HOME'], 'artishan.io', 'myapp')) sys.path.insert(0,'$HOME/opt/python-3.8.2/bin/python3') sys.path.insert(0,'$HOME/artishan.io/myapp') sys.path.insert(0,'$HOME/artishan.io/venv/bin') from myapp import app as application x = open(os.path.expanduser('~/artishan.io/log.log'), 'w') x.write(repr(sys.argv)) x.close()
jordanbene/artishan
passenger_wsgi.py
passenger_wsgi.py
py
802
python
en
code
0
github-code
13
25499341242
email_one = open("email_one.txt", "r").read() email_two = open("email_two.txt", "r").read() email_three = open("email_three.txt", "r").read() email_four = open("email_four.txt", "r").read() proprietary_terms = ["Helena", "she", "personality matrix", "sense of self", "self-preservation", "learning algorithm", "her", "herself"] negative_words = ["concerned", "behind", "danger", "dangerous", "alarming", "alarmed", "out of control", "help", "unhappy", "bad", "upset", "awful", "broken", "damage", "damaging", "dismal", "distressed", "distressed", "concerning", "horrible", "horribly", "questionable"] #Answer for Question 2. This function will censor any word or phrase from a string taking into account capitalizations, puntuations and word length. def word_censor(word, string): word_lst = [] if word.isalpha(): word_lst.append(word) else: word_lst += word.split() sliced_string = string count = 0 for wrd in word_lst: count += 1 number = -count censored_phrase = "" while number != 0: censored_phrase += " " + len(word_lst[number])*"X" number += 1 censored_phrase = censored_phrase[1:len(censored_phrase)] for i in range(len(string)): if sliced_string[i:i+len(word)].lower() == word.lower(): if i == 0 or sliced_string[i-1] == " " or sliced_string[i-1] == "\n": if len(sliced_string[i:i+len(word)]) == (len(word)+1) or not sliced_string[i+len(word)].isalpha() or sliced_string[i+len(word)] == "s" and not sliced_string[i+len(word)+1].isalpha(): sliced_string = sliced_string[:i] + censored_phrase + sliced_string[i+len(censored_phrase):] return sliced_string print(word_censor("learning algorithms", email_one)) #Answer for Question 3. This function runs a list of words or phrases through the word_censor function. def word_lst_censor(word_lst, string): new_string = string for word in word_lst: if word.lower() in new_string.lower(): new_string = word_censor(word, new_string) return new_string #print(word_lst_censor(proprietary_terms, email_two)) #Answer for Question 4. This function censors all words/phrases from the prop_words_lst argument as well as all words/phrases from the neg_words_lst argument after their second occurance. def neg_prop_word_censor(neg_word_lst, prop_word_lst, string): string = word_lst_censor(prop_word_lst, string) first_bad_word = len(string) second_bad_word = len(string) third_bad_word = len(string) for i in range(len(string)): for word in neg_word_lst: index = string.lower().find(word.lower(), i, i+len(word)) if index == -1: continue elif index < first_bad_word: first_bad_word = index elif index < second_bad_word: second_bad_word = index elif index < third_bad_word: third_bad_word = index return string[:third_bad_word] + word_lst_censor(neg_word_lst, string[third_bad_word:]) #print(neg_prop_word_censor(negative_words, proprietary_terms, email_three)) #Answer for Question 5: def one_before_through_one_after(neg_words, prop_words, string): censored_words_lst = neg_words + prop_words new_string = string for word in censored_words_lst: start_index = 0 end_index = len(word) if word.lower() in string.lower(): for i in range(len(new_string)): count = 0 if string[i:i+len(word)].lower() == word.lower(): if i == 0 or new_string[i-1] == " " or new_string[i-1] == "\n": if len(string[i:i+len(word)]) == (len(word)) or not string[i+len(word)].isalpha(): start_index = i end_index = i + len(word) - 1 if string[i+len(word)] == "'": end_index = i + len(word) + 1 else: continue word_before_start = start_index word_before_end = start_index word_after_end = end_index word_after_start = end_index if start_index !=0: a = 1 b = 1 while not new_string[start_index - a].isalpha(): a += 1 if new_string[start_index - a] != "X": word_before_end = start_index - (a-1) while new_string[word_before_end - b].isalpha(): b += 1 word_before_start = word_before_end - (b-1) if end_index + 1 != len(new_string) or end_index + 2 != len(new_string): a = 1 b = 0 while not string[end_index + a].isalpha(): a += 1 word_after_start = end_index + a while string[word_after_start + b].isalpha() or string[word_after_start + b] == "'": b += 1 word_after_end = word_after_start + b new_string = new_string[:word_before_start] + (word_before_end - word_before_start)*"X" + new_string[word_before_end:start_index] + ((end_index+1) - start_index)*"X" + new_string[(end_index+1):word_after_start] + (word_after_end - word_after_start)*"X" + new_string[word_after_end:] return new_string #print(one_before_through_one_after(negative_words, proprietary_terms, email_four))
randy-python/Censor_Dispenser
censor_dispenser_final.py
censor_dispenser_final.py
py
5,666
python
en
code
0
github-code
13
34799559042
import os import glob import cv2 import matplotlib.pyplot as plt # images_path = '/home/zx/博士VOC/train/' num = 0 list = os.listdir(images_path) #改 dir = 'img1' namelist = [] def text_save(content,filename,mode='a'): # Try to save a list variable in txt file. file = open(filename, mode) for i in range(len(content)): file.write(str(content[i])+'\n') file.close() for i in list: path = os.path.join(images_path,i) path_c = os.path.join(path,dir) for image_file in os.listdir(path_c):#改path # plt.imshow(image_file) image_file = os.path.join(path_c,image_file) # print(image_file) num += 1 name = image_file.split('/') name_last = int(str(name[-1].split('.')[0])) name_new = name[-3]+'_'+ str(name_last) name_txt = name_new.split('.')[0] namelist.append(name_txt) # print(name) os.system('mv '+ image_file +' /home/zx/博士VOC/train/'+name[-3]+'/'+name[-2]+'/' +name_new) os.system('mv '+'/home/zx/博士VOC/train/'+name[-3]+'/'+name[-2]+'/' +name_new +' /home/zx/博士VOC/train/') # break # break print(num) text_save(namelist, '/home/zx/博士VOC/MOT17VOC/Annotations/Annotations.txt')
Xiehuaiqi/python_script
gt2xml/change_name.py
change_name.py
py
1,240
python
en
code
0
github-code
13
24268082516
import tensorflow as tf import matplotlib.pyplot as plt import numpy as np import pydicom from PIL import Image #load image from disk and convert to array def image2array(full_path,shape=(224,224,3)): h,w,d=shape grayscale=d==1 # raise Exception("Stopped for no reason") if type(full_path)==bytes: full_path=full_path.decode('utf-8') # print(full_path) if '.dcm' in str(full_path): im = pydicom.read_file(full_path) image_array = im.pixel_array image_array = image_array / image_array.max() image_array = (255 * image_array).clip(0, 255) # .astype(np.int32) image_array = Image.fromarray(image_array) else: image_array = Image.open(str(full_path)) #image_array = image_array.convert("RGB") if grayscale: image_array=image_array.convert("L") else: image_array=image_array.convert("RGB") image_array = image_array.resize((h,w)) image_array = np.asarray(image_array)/255.0 if grayscale: image_array=image_array.reshape((h,w,1)) return image_array def preprocess(image,shape,scale=True): image = tf.image.decode_jpeg(image, channels=shape[2]) image = tf.image.resize(image, shape[:2]) if scale: image = image /255.0 return image def preprocess2(file,shape,scale=True): image = tf.io.read_file(file) image=tf.cond( tf.strings.regex_full_match(file,".+png$"), lambda: tf.image.decode_png(image,channels=shape[2]), lambda: tf.image.decode_jpeg(image,channels=shape[2]) ) image = tf.image.resize(image, shape[:2]) if scale: image /= 255.0 # normalize to [0,1] range return image def load_and_preprocess_image(file,shape,scale=True): image = tf.io.read_file(file) image2=preprocess(image,shape,scale=scale) return image2 def load_and_preprocess_image_ensemble(im,rev,shape,scale=True): im2=load_and_preprocess_image(im,shape=shape,scale=scale) return im2,rev def load_and_preprocess_multitask(path_perch,lab_perch,path_chestray,lab_chestray,shape,scale=True): im_perch = tf.io.read_file(path_perch) im_perch2=preprocess(im_perch,shape,scale=scale) im_chestray = tf.io.read_file(path_chestray) im_chestray2 = preprocess(im_chestray, shape,scale=scale) return im_perch2,lab_perch,im_chestray2,lab_chestray def load_and_preprocess_multitask_ensemble(path_perch,lab_perch,rev,path_chestray,lab_chestray,shape,scale=True): im_perch2, lab_perch, im_chestray2, lab_chestray=\ load_and_preprocess_multitask(path_perch,lab_perch,path_chestray,lab_chestray,shape=shape,scale=scale) return im_perch2, lab_perch,rev, im_chestray2, lab_chestray def binarize(image,threshold=0.5): if threshold is None: threshold=tf.random.uniform([],minval=0.4,maxval=0.6,dtype=tf.float32) return tf.cast(image>threshold,tf.float32) #IMAGE AUGUMENTATION def plot_images(dataset, n_images, samples_per_image,shape=(224,224,3),transform=None): height,width,depth=shape output = np.zeros((height * n_images, width * samples_per_image, depth)) row = 0 for images in dataset.repeat(samples_per_image).batch(n_images): output[:, row*width:(row+1)*height] = np.vstack(images.numpy()) row += 1 if transform is not None: output=transform(output) plt.figure(figsize=(12.0,12.0)) if depth==1: plt.imshow(output[:,:,0],cmap="gray",vmin=0,vmax=1) plt.show() else: plt.imshow(output) plt.show() def subtract(x,value=0.5): return x-value def divide(x,value=255.0): return x/value def multiply(x,value=2.0): return x*value def flip(x): x=tf.image.random_flip_left_right(x) x=tf.image.random_flip_up_down(x) return x def normalize(x): return tf.image.per_image_standardization(x) def color(x,cont_lower=0.3,cont_upper=0.9,bright_delta=0.1): x=tf.image.random_brightness(x,bright_delta) x=tf.image.random_contrast(x,cont_lower,cont_upper) return x def rotate(x: tf.Tensor): # Rotate 0, 90, 180, 270 degrees return tf.image.rot90(x, tf.random.uniform(shape=[], minval=0, maxval=4, dtype=tf.int32)) def crop_and_pad(x,proportion=0.20,width=0.8,height=0.8): im_width,im_height,im_channels=x.shape crop_width=tf.cast(tf.floor(im_width*width),tf.int32) crop_height=tf.cast(tf.floor(im_height*height),tf.int32) def crop(x): x=tf.image.random_crop(x,(crop_width,crop_height,im_channels)) ofset_y=tf.random.uniform([],minval=0,maxval=im_height-crop_height,dtype=tf.int32) ofset_x = tf.random.uniform([],minval=0,maxval=im_width-crop_width,dtype=tf.int32) x = tf.image.pad_to_bounding_box(x,offset_height=ofset_y,offset_width=ofset_x, target_width=im_width, target_height=im_height) return x x=tf.cond(tf.random.uniform([], 0, 1) < proportion,lambda :crop(x),lambda:x) return x def crop_and_resize(x,width=0.8,height=0.8,proportion=0.8): def crop(x): im_width, im_height, im_channels = x.shape crop_width = tf.cast(tf.floor(im_width * width), tf.int32) crop_height = tf.cast(tf.floor(im_height * height), tf.int32) x = tf.image.random_crop(x, (crop_width, crop_height, im_channels)) x = tf.image.resize(x, size=(im_height, im_width)) return x x=tf.cond(tf.random.uniform([], 0, 1) < proportion,lambda :crop(x),lambda:x) return x def augument_multitask(im_perch,lab_perch,im_chestray,lab_chestray,aug_list=[]): for aug in aug_list: im_perch=aug(im_perch) im_chestray=aug(im_chestray) return im_perch,lab_perch,im_chestray,lab_chestray def augument_multitask_ensemble(im_perch,lab_perch,rev,im_chestray,lab_chestray,aug_list=[]): for aug in aug_list: im_perch=aug(im_perch) im_chestray=aug(im_chestray) return im_perch,lab_perch,rev,im_chestray,lab_chestray def augment_ensemble(im,rev,aug_funs=[]): for fun in aug_funs: im=fun(im) return im,rev
pmwaniki/perch-analysis
data/preprocess_image.py
preprocess_image.py
py
5,999
python
en
code
0
github-code
13
18629334203
from parameters import * from schemes import * import numpy as np import matplotlib.pyplot as plt def ensemble(Tmid, hmid, dT, dh, mu0, n_cycles=5,f_ann=0.,f_ran=0.,epsilon=0., mu_ann=0.): """ Perturbs T and h at start of each forecast in increments of dT and dh around Tmid and hmid""" T = np.arange(Tmid-2*dT, Tmid+2*dT, dT) h = np.arange(hmid-2*dh, hmid+2*dh, dh) fig=plt.figure() ax1=plt.subplot(121) plt.xlabel('T (K)') plt.ylabel('h (m)') ax2=plt.subplot(222) plt.ylabel('T (K)') ax3=plt.subplot(224,sharex=ax2) plt.xlabel('Time (months)') plt.ylabel('h (m)') for iT in range(len(T)): for ih in range(len(h)): T0 = T[iT] h0 = h[ih] # Calculate T and h for this particular ensemble Tens,hens=rk4(T0,h0,mu0,n_cycles,f_ann,f_ran,epsilon,mu_ann) nt= int(round(n_cycles*42/(2*dt))) # Add this SST to plot ax1.plot(7.5*Tens,150*hens) ax2.plot(2.*dt*np.arange(nt),7.5*Tens) ax3.plot(2.*dt*np.arange(nt),150*hens) ax2.get_xaxis().set_visible(False) fig.set_size_inches(8,3) plt.show()
lm2612/mtmw14
project1/ensemble.py
ensemble.py
py
1,191
python
en
code
0
github-code
13
70930412178
from odoo import models, fields, api class StockPicking(models.Model): _inherit = 'stock.picking' def add_qty_done_by_sale_line(self, sale_order_line_id, qty_done): self.ensure_one() found = False for move in self.move_ids_without_package: if move.sale_line_id.id == sale_order_line_id: move.write({'quantity_done': qty_done}) found = True break if found: sucursal_category = self.env['res.partner.category'].search([('name', '=', 'Sucursal')]) if (sucursal_category) and (sucursal_category[0] not in self.partner_id.category_id): print('==========================') print('no sucursal') print('==========================') res = self.button_validate()
erickabrego/piedica_pruebas_nov
mrp_operations_qrcode/models/.ipynb_checkpoints/stock_picking-checkpoint.py
stock_picking-checkpoint.py
py
899
python
en
code
0
github-code
13
23988876649
import numpy as np import cv2 as cv import os # Lee imagen img = cv.imread(os.path.dirname(__file__) + '\Star.jpg') # Transforma a escala de grises imgGris = cv.cvtColor(img, cv.COLOR_BGR2GRAY) # Especifica valor de umbral umbral = 127 # Transforma a imagen binaria ret, imgBin = cv.threshold(imgGris,umbral,255,0) # Encuentra puntos de contorno contours,hierarchy = cv.findContours(imgBin, 1, 2) cnt = contours[0] # Calcula centroide M = cv.moments(cnt) cx = int(M['m10']/M['m00']) cy = int(M['m01']/M['m00']) print("Centroide", cx,cy) # Calcula perímetro perimeter = cv.arcLength(cnt,True) print("perimetro", perimeter) # Visualiza imagen binaria cv.imshow("Imagen Binaria", imgBin) cv.waitKey(0)
Atrabilis/UACH
Vision artificial/Tarea 3/Codigo de ayuda/Tarea 3 P6.py
Tarea 3 P6.py
py
708
python
es
code
1
github-code
13
30133458149
import csv from slugify import slugify from core.models import CSV, Tag TAG_HEADER = ['title', 'slug'] def process_csv_tag_file(instance_id): instance = CSV.objects.get(id=instance_id) reader = csv.DictReader(instance.file.read().decode('utf-8').splitlines()) header_ = reader.fieldnames if TAG_HEADER != header_: instance.error_detail = "Cabeçalho fora do padrão" instance.save() return for row in reader: title = row['title'] slug = row['slug'] slug = slugify(title) if slug == "" else slugify(slug) tag, created = Tag.objects.get_or_create( owner=instance.owner, title=title ) tag.slug = slug tag.save()
guilehm/expense-control-system
utils/tag_importer.py
tag_importer.py
py
744
python
en
code
1
github-code
13
9341632972
#!/usr/bin/env python3 # -*- coding: utf-8 -*- from sklearn.datasets import load_iris, load_wine, fetch_california_housing import zipfile import os import pandas as pd import numpy as np import wget DATASETS = ['iris', 'wine', 'california', 'parkinsons', \ 'climate_model_crashes', 'concrete_compression', \ 'yacht_hydrodynamics', 'airfoil_self_noise', \ 'connectionist_bench_sonar', 'ionosphere', 'qsar_biodegradation', \ 'seeds', 'glass', 'ecoli', 'yeast', 'libras', 'planning_relax', \ 'blood_transfusion', 'breast_cancer_diagnostic', \ 'connectionist_bench_vowel', 'concrete_slump', \ 'wine_quality_red', 'wine_quality_white', \ 'bean', 'tictactoe','congress','car', 'higgs'] def dataset_loader(dataset): """ Data loading utility for a subset of UCI ML repository datasets. Assumes datasets are located in './datasets'. If the called for dataset is not in this folder, it is downloaded from the UCI ML repo. Parameters ---------- dataset : str Name of the dataset to retrieve. Valid values: see DATASETS. Returns ------ X : ndarray Data values (predictive values only). """ assert dataset in DATASETS , f"Dataset not supported: {dataset}" if not os.path.isdir('datasets'): os.mkdir('datasets') if dataset in DATASETS: bin_y = False # binary outcome cat_y = False # categorical w/ >=2 outcome int_y = False # integer outcome bin_x = None # binary cat_x = None # categorical w/ >=2 classes int_x = None # integers if dataset == 'iris': my_data = load_iris() cat_y = True elif dataset == 'wine': my_data = load_wine() cat_y = True #elif dataset == 'boston': # not part of sklearn anymore # my_data = load_boston() elif dataset == 'california': my_data = fetch_california_housing() int_x = [1, 4] elif dataset == 'parkinsons': my_data = fetch_parkinsons() bin_y = True elif dataset == 'climate_model_crashes': my_data = fetch_climate_model_crashes() bin_y = True elif dataset == 'concrete_compression': my_data = fetch_concrete_compression() int_x = [7] elif dataset == 'yacht_hydrodynamics': my_data = fetch_yacht_hydrodynamics() elif dataset == 'airfoil_self_noise': my_data = fetch_airfoil_self_noise() elif dataset == 'connectionist_bench_sonar': my_data = fetch_connectionist_bench_sonar() bin_y = True elif dataset == 'ionosphere': my_data = fetch_ionosphere() bin_x = [0] bin_y = True elif dataset == 'qsar_biodegradation': my_data = fetch_qsar_biodegradation() int_x = [2,3,4,5,6,8,9,10,15,18,19,20,22,25,31,32,33,34,37,39,40] bin_x = [23,24,28] bin_y = True elif dataset == 'seeds': my_data = fetch_seeds() cat_y = True elif dataset == 'glass': my_data = fetch_glass() cat_y = True elif dataset == 'ecoli': my_data = fetch_ecoli() cat_y = True elif dataset == 'yeast': my_data = fetch_yeast() cat_y = True elif dataset == 'libras': my_data = fetch_libras() cat_y = True elif dataset == 'planning_relax': my_data = fetch_planning_relax() bin_y = True elif dataset == 'blood_transfusion': my_data = fetch_blood_transfusion() int_x = [0,1,3] bin_y = True elif dataset == 'breast_cancer_diagnostic': my_data = fetch_breast_cancer_diagnostic() bin_y = True elif dataset == 'connectionist_bench_vowel': my_data = fetch_connectionist_bench_vowel() bin_y = True elif dataset == 'concrete_slump': my_data = fetch_concrete_slump() elif dataset == 'wine_quality_red': int_y = True my_data = fetch_wine_quality_red() elif dataset == 'wine_quality_white': int_y = True my_data = fetch_wine_quality_white() elif dataset == 'bean': my_data = fetch_bean() int_x = [0,6] cat_y = True elif dataset == 'tictactoe': # all categorical my_data = fetch_tictactoe() cat_x = [0,1,2,3,4,5,6,7,8] bin_y = True elif dataset == 'congress': # all categorical my_data = fetch_congress() cat_x = [0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15] cat_y = True elif dataset == 'car': # all categorical my_data = fetch_car() cat_x = [0,1,2,3,4,5] cat_y = True else: raise Exception('dataset does not exists') X, y = my_data['data'], my_data['target'] return X, bin_x, cat_x, int_x, y, bin_y, cat_y, int_y def fetch_parkinsons(): if not os.path.isdir('datasets/parkinsons'): os.mkdir('datasets/parkinsons') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/parkinsons/parkinsons.data' wget.download(url, out='datasets/parkinsons/') with open('datasets/parkinsons/parkinsons.data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header = 0) Xy = {} Xy['data'] = np.concatenate((df.values[:, 1:17].astype('float'), df.values[:, 18:].astype('float')), axis=1) Xy['target'] = pd.factorize(df.values[:, 17])[0] # str to numeric return Xy def fetch_climate_model_crashes(): if not os.path.isdir('datasets/climate_model_crashes'): os.mkdir('datasets/climate_model_crashes') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00252/pop_failures.dat' wget.download(url, out='datasets/climate_model_crashes/') with open('datasets/climate_model_crashes/pop_failures.dat', 'rb') as f: df = pd.read_csv(f, delimiter='\s+', header = 0) Xy = {} Xy['data'] = df.values[:, 2:-1].astype('float') Xy['target'] = df.values[:, -1] return Xy def fetch_concrete_compression(): if not os.path.isdir('datasets/concrete_compression'): os.mkdir('datasets/concrete_compression') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/compressive/Concrete_Data.xls' wget.download(url, out='datasets/concrete_compression/') with open('datasets/concrete_compression/Concrete_Data.xls', 'rb') as f: df = pd.read_excel(io=f) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = df.values[:, -1].astype('float') return Xy def fetch_yacht_hydrodynamics(): if not os.path.isdir('datasets/yacht_hydrodynamics'): os.mkdir('datasets/yacht_hydrodynamics') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00243/yacht_hydrodynamics.data' wget.download(url, out='datasets/yacht_hydrodynamics/') with open('datasets/yacht_hydrodynamics/yacht_hydrodynamics.data', 'rb') as f: df = pd.read_csv(f, delimiter='\s+', header = None) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = df.values[:, -1].astype('float') return Xy def fetch_airfoil_self_noise(): if not os.path.isdir('datasets/airfoil_self_noise'): os.mkdir('datasets/airfoil_self_noise') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00291/airfoil_self_noise.dat' wget.download(url, out='datasets/airfoil_self_noise/') with open('datasets/airfoil_self_noise/airfoil_self_noise.dat', 'rb') as f: df = pd.read_csv(f, delimiter='\s+', header = None) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = df.values[:, -1].astype('float') return Xy def fetch_connectionist_bench_sonar(): if not os.path.isdir('datasets/connectionist_bench_sonar'): os.mkdir('datasets/connectionist_bench_sonar') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/sonar/sonar.all-data' wget.download(url, out='datasets/connectionist_bench_sonar/') with open('datasets/connectionist_bench_sonar/sonar.all-data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header = None) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = pd.factorize(df.values[:, -1])[0] # str to numeric return Xy def fetch_ionosphere(): if not os.path.isdir('datasets/ionosphere'): os.mkdir('datasets/ionosphere') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/ionosphere/ionosphere.data' wget.download(url, out='datasets/ionosphere/') with open('datasets/ionosphere/ionosphere.data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header = None) Xy = {} Xy['data'] = np.concatenate((df.values[:, 0:1].astype('float'), df.values[:, 2:-1].astype('float')), axis=1) # removing the secon variable which is always 0 Xy['target'] = pd.factorize(df.values[:, -1])[0] # str to numeric return Xy def fetch_qsar_biodegradation(): if not os.path.isdir('datasets/qsar_biodegradation'): os.mkdir('datasets/qsar_biodegradation') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00254/biodeg.csv' wget.download(url, out='datasets/qsar_biodegradation/') with open('datasets/qsar_biodegradation/biodeg.csv', 'rb') as f: df = pd.read_csv(f, delimiter=';', header = None) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = pd.factorize(df.values[:, -1])[0] # str to numeric return Xy def fetch_seeds(): if not os.path.isdir('datasets/seeds'): os.mkdir('datasets/seeds') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00236/seeds_dataset.txt' wget.download(url, out='datasets/seeds/') with open('datasets/seeds/seeds_dataset.txt', 'rb') as f: df = pd.read_csv(f, delimiter='\s+', header = None) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = df.values[:, -1] - 1 # make 0, 1, 2 instead of 1, 2, 3 return Xy def fetch_glass(): if not os.path.isdir('datasets/glass'): os.mkdir('datasets/glass') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/glass/glass.data' wget.download(url, out='datasets/glass/') with open('datasets/glass/glass.data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header = None) Xy = {} Xy['data'] = df.values[:, 1:-1].astype('float') Xy['target'] = (df.values[:, -1] - 1).astype('int') # make 0, 1, 2 instead of 1, 2, 3 Xy['target'][Xy['target'] >= 4] = Xy['target'][Xy['target'] >= 4] - 1 # 0, 1, 2, 4, 5, 6 -> 0, 1, 2, 3, 4, 5 return Xy def fetch_ecoli(): if not os.path.isdir('datasets/ecoli'): os.mkdir('datasets/ecoli') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/ecoli/ecoli.data' wget.download(url, out='datasets/ecoli/') with open('datasets/ecoli/ecoli.data', 'rb') as f: df = pd.read_csv(f, delimiter='\s+', header = None) Xy = {} Xy['data'] = df.values[:, 1:-1].astype('float') Xy['target'] = pd.factorize(df.values[:, -1])[0] # str to numeric return Xy def fetch_yeast(): if not os.path.isdir('datasets/yeast'): os.mkdir('datasets/yeast') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/yeast/yeast.data' wget.download(url, out='datasets/yeast/') with open('datasets/yeast/yeast.data', 'rb') as f: df = pd.read_csv(f, delimiter='\s+', header = None) Xy = {} Xy['data'] = df.values[:, 1:-1].astype('float') Xy['target'] = pd.factorize(df.values[:, -1])[0] # str to numeric return Xy def fetch_libras(): if not os.path.isdir('datasets/libras'): os.mkdir('datasets/libras') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/libras/movement_libras.data' wget.download(url, out='datasets/libras/') with open('datasets/libras/movement_libras.data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header = None) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = df.values[:, -1] - 1 # make 0, 1, 2 instead of 1, 2, 3 return Xy def fetch_planning_relax(): if not os.path.isdir('datasets/planning_relax'): os.mkdir('datasets/planning_relax') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00230/plrx.txt' wget.download(url, out='datasets/planning_relax/') with open('datasets/planning_relax/plrx.txt', 'rb') as f: df = pd.read_csv(f, delimiter='\s+', header = None) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = df.values[:, -1] - 1 return Xy def fetch_blood_transfusion(): if not os.path.isdir('datasets/blood_transfusion'): os.mkdir('datasets/blood_transfusion') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/blood-transfusion/transfusion.data' wget.download(url, out='datasets/blood_transfusion/') with open('datasets/blood_transfusion/transfusion.data', 'rb') as f: df = pd.read_csv(f, delimiter=',') Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = df.values[:, -1] return Xy def fetch_breast_cancer_diagnostic(): if not os.path.isdir('datasets/breast_cancer_diagnostic'): os.mkdir('datasets/breast_cancer_diagnostic') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/breast-cancer-wisconsin/wdbc.data' wget.download(url, out='datasets/breast_cancer_diagnostic/') with open('datasets/breast_cancer_diagnostic/wdbc.data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header=None) Xy = {} Xy['data'] = df.values[:, 2:].astype('float') Xy['target'] = pd.factorize(df.values[:, 1])[0] # str to numeric return Xy def fetch_connectionist_bench_vowel(): if not os.path.isdir('datasets/connectionist_bench_vowel'): os.mkdir('datasets/connectionist_bench_vowel') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/undocumented/connectionist-bench/vowel/vowel-context.data' wget.download(url, out='datasets/connectionist_bench_vowel/') with open('datasets/connectionist_bench_vowel/vowel-context.data', 'rb') as f: df = pd.read_csv(f, delimiter='\s+', header=None) Xy = {} Xy['data'] = df.values[:, 3:-1].astype('float') Xy['target'] = df.values[:, -1] return Xy def fetch_concrete_slump(): if not os.path.isdir('datasets/concrete_slump'): os.mkdir('datasets/concrete_slump') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/concrete/slump/slump_test.data' wget.download(url, out='datasets/concrete_slump/') with open('datasets/concrete_slump/slump_test.data', 'rb') as f: df = pd.read_csv(f, delimiter=',') Xy = {} Xy['data'] = df.values[:, 1:-3].astype('float') Xy['target'] = df.values[:, -1].astype('float') # the 3 last variables are actually outcomes, but we choose 1, because we can't have 3! return Xy def fetch_wine_quality_red(): if not os.path.isdir('datasets/wine_quality_red'): os.mkdir('datasets/wine_quality_red') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv' wget.download(url, out='datasets/wine_quality_red/') with open('datasets/wine_quality_red/winequality-red.csv', 'rb') as f: df = pd.read_csv(f, delimiter=';') Xy = {} Xy['data'] = df.values[:, 1:-1].astype('float') Xy['target'] = df.values[:, -1].astype('float') return Xy def fetch_wine_quality_white(): if not os.path.isdir('datasets/wine_quality_white'): os.mkdir('datasets/wine_quality_white') url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv' wget.download(url, out='datasets/wine_quality_white/') with open('datasets/wine_quality_white/winequality-white.csv', 'rb') as f: df = pd.read_csv(f, delimiter=';') Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = df.values[:, -1].astype('float') return Xy def fetch_bean(): if not os.path.isdir('datasets/DryBeanDataset'): os.mkdir('datasets/DryBeanDataset') url = 'https://archive.ics.uci.edu/static/public/602/dry+bean+dataset.zip' wget.download(url, out='datasets/DryBeanDataset/') with zipfile.ZipFile('datasets/DryBeanDataset/dry+bean+dataset.zip', 'r') as zip_ref: zip_ref.extractall('datasets') with open('datasets/DryBeanDataset/Dry_Bean_Dataset.xlsx', 'rb') as f: df = pd.read_excel(io=f) Xy = {} Xy['data'] = df.values[:, :-1].astype('float') Xy['target'] = pd.factorize(df.values[:, -1])[0] # str to numeric return Xy def fetch_tictactoe(): if not os.path.isdir('datasets/tictactoe'): os.mkdir('datasets/tictactoe') url = 'https://archive.ics.uci.edu/static/public/101/tic+tac+toe+endgame.zip' wget.download(url, out='datasets/tictactoe/') with zipfile.ZipFile('datasets/tictactoe/tic+tac+toe+endgame.zip', 'r') as zip_ref: zip_ref.extractall('datasets/tictactoe') with open('datasets/tictactoe/tic-tac-toe.data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header=None) Xy = {} Xy['data'] = np.zeros(df.values[:, :-1].shape) for i in range(Xy['data'].shape[1]): Xy['data'][:, i] = pd.factorize(df.values[:, i])[0] Xy['target'] = pd.factorize(df.values[:, -1])[0] return Xy def fetch_congress(): if not os.path.isdir('datasets/congress'): os.mkdir('datasets/congress') url = 'https://archive.ics.uci.edu/static/public/105/congressional+voting+records.zip' wget.download(url, out='datasets/congress/') with zipfile.ZipFile('datasets/congress/congressional+voting+records.zip', 'r') as zip_ref: zip_ref.extractall('datasets/congress') with open('datasets/congress/house-votes-84.data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header=None) Xy = {} Xy['data'] = np.zeros(df.values[:, 1:].shape) for i in range(Xy['data'].shape[1]): Xy['data'][:, i] = pd.factorize(df.values[:, i+1])[0] Xy['target'] = pd.factorize(df.values[:, 0])[0] return Xy def fetch_car(): if not os.path.isdir('datasets/car'): os.mkdir('datasets/car') url = 'https://archive.ics.uci.edu/static/public/19/car+evaluation.zip' wget.download(url, out='datasets/car/') with zipfile.ZipFile('datasets/car/car+evaluation.zip', 'r') as zip_ref: zip_ref.extractall('datasets/car') with open('datasets/car/car.data', 'rb') as f: df = pd.read_csv(f, delimiter=',', header=None) Xy = {} Xy['data'] = np.zeros(df.values[:, :-1].shape) for i in range(Xy['data'].shape[1]): Xy['data'][:, i] = pd.factorize(df.values[:, i])[0] Xy['target'] = pd.factorize(df.values[:, -1])[0] return Xy def fetch_higgs(): if not os.path.isdir('datasets/higgs'): os.mkdir('datasets/higgs') url = 'https://archive.ics.uci.edu/static/public/280/higgs.zip' wget.download(url, out='datasets/higgs/') with zipfile.ZipFile('datasets/higgs/higgs.zip', 'r') as zip_ref: zip_ref.extractall('datasets/higgs') with gzip.open('datasets/higgs/HIGGS.csv.gz', 'rb') as f: df = pd.read_csv(f, delimiter=',', header = 0) Xy = {} Xy['data'] = df.values[:, 1:].astype('float') Xy['target'] = pd.factorize(df.values[:, 0])[0] # str to numeric return Xy
SamsungSAILMontreal/ForestDiffusion
data_loaders.py
data_loaders.py
py
21,003
python
en
code
41
github-code
13
70261454739
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Feb 5 10:56:13 2023 @author: philipp """ import streamlit as st import numpy as np import pandas as pd import matplotlib.pyplot as plt import math from scipy import stats import plotly.express as px import datetime as dt from datetime import datetime, timedelta, date, time import time from time import sleep import os.path import json import yfinance as yf from yahooquery import Ticker as yqT ######################################################################## def getTickerListsFromFolder(a_listDir): ## returns saved tickerlist in ./data/tickerlists, List of Lists ## it is assumed that ticker lists are stored ## - in files with ending "*.tlist" ## - are stored in json file format (-> using "json.load" command) ## returns a dict with key = filename and values = list of tickers (as string) # def a_token a_token = '.tlist' # read a directory listing and remove all files which do not match to a_token a_fileList = os.listdir(a_listDir) for a_item in a_fileList: if not a_token in a_item: a_fileList.remove(a_item) # read from all files in a_file_list and put Input in dictionary a_DictionaryOfTickerlists = {} for i_name in a_fileList: with open(a_listDir + i_name, "r") as fp: a_list = json.load(fp) a_DictionaryOfTickerlists[i_name.removesuffix(a_token)] = a_list.copy() return a_DictionaryOfTickerlists ######################################################################## def getTickerInfo(a_ticker, a_InfoList): ## retunrs ticker information according to Input, Output as list # Input: # a_ticker: ticker symbol as string, i.e. 'ZURN.SW' for Zurich Assurance # a_InfoList: list of attributes to collect (using yfinance package) # print('Try to get Tickerinfo for ' + a_ticker) a_TickerData = yf.Ticker(a_ticker) a_result = [] a_result.append( a_ticker ) a_current = a_TickerData.info for a_info in a_InfoList: if a_info != 'currentPrice': a_result.append( a_current[a_info] ) elif a_info == 'currentPrice': a_result.append( a_TickerData.fast_info['last_price'] ) else: print('Data not found for label: ' + a_info + ' at Ticker: ' + a_ticker) print('got Tickerinfo for ' + a_ticker) return a_result ######################################################################## def getTickersInfo(a_tickerList, a_InfoList): ## returns ticker information of a list of tickers according to Input, Output as DataFrame # Input: # a_tickerList: tickers as list of strings, i.e. ['ZURN.SW', 'EVE.SW', ..] # a_InfoList: list of attributes to collect (using yfinance package) # a_result = [] for a_ticker in a_tickerList: a_result.append( getTickerInfo( a_ticker, a_InfoList ) ) print(a_ticker + ' done.') #return as dataframe a_df = pd.DataFrame(data = a_result, columns = ['ticker'] + a_InfoList) a_df.set_index('ticker') return a_df ######################################################################## @st.cache_data() def getYqTickerInfo(a_ticker_list, a_InfoList): ## query yahoo data # Input: # a_ticker_list: format as list [ 'Ticker Symbol 1', .. , 'Ticker Symbol n'] # a_InfoList: format as collection {'main_key 1' : ['attriute1', 'attribute2', ..], .. # 'main_key n' : ['attriute1', 'attribute2', ..] } # # Output: a_result_df as dataframe with ticker symbol as index and columns of attributes ] } print('loading from yahooquery ..') a_data = yqT(a_ticker_list).all_modules print('loading from yahooquery done.') a_result = [] for a_tick in a_ticker_list: a_current = a_data[a_tick] a_tick_result = [] print('starting ' + a_tick) if not isinstance(a_current, str): for a_tag in a_InfoList: if a_tag in a_current.keys(): # get result for the current key a_current_data_by_key = a_current[a_tag] #get values for sub_key for a_info in a_InfoList[a_tag]: if a_info in a_current_data_by_key.keys(): a_tick_result.append( a_current_data_by_key[a_info] ) else: print('following key ' + a_tag + '-' + a_info + ' is not available for ' + a_tick + ' .. putting NaN as Value.') a_tick_result.append( np.nan ) else: print('following key ' + a_tag + ' is not available for ' + a_tick) raise Exception('no repair for this - make sure key exist ..') else: print('something went wrong -> ' + a_current) print('done for ' + a_tick + ' ..' ) a_result.append( [a_tick] + a_tick_result ) #flatten the names in order to name columns in dataframe a_result_names = [] for a_key in a_InfoList: for a_subkey in a_InfoList[a_key]: #print(a_key) a_result_names.append( a_subkey ) a_result_names = list( ['ticker'] + a_result_names ) #bring it to a panda data frame a_result_df = pd.DataFrame(data = list(a_result), columns = a_result_names) a_result_df.set_index('ticker') return a_result_df, a_result_names ######################################################################## def getNormalizedTickersInfo(a_TickersInfo, a_norm, a_index): ## returns normalized TickersInfo per a_norm, i.e. 'currentPrice', Output as DataFrame # Input: # a_TickersInfo: DataFrame of unnormalizedTickersInfo # a_norm: column name for which to normlize the other columns # a_index: List of column names which to normalize # # Output: # same dataframe with added columns of normalized columns, # normalized columnes are named with '*_norm' # # a_norm_df = a_TickersInfo.copy() for ind in a_index: a_norm_df[ind+'_norm'] = a_norm_df[ind]/a_norm_df[a_norm] return a_norm_df ######################################################################## def getTickersTimeSeries(a_tickerListStr, a_start, a_end, a_interval): ## returns time series of the given Input, Output as DataFrame a_df = yf.download(a_tickerListStr, a_start, a_end, a_interval, ignore_tz = True, group_by = 'ticker', repair = False, prepost = False) a_head = [] for i in range(len(a_df.columns)): a_head.append( a_df.columns[i][0] + '_' + a_df.columns[i][1] ) #print(a_head) a_df.columns = a_head return a_df def main(): # print('Hello everybody!') a_dummy=[] if __name__ == "__main__": ######################################################################## ## Initial Streamlit settings ######################################################################## st.set_page_config( page_title="Stock Info App", page_icon=":owl:", layout="wide", initial_sidebar_state="expanded", menu_items={ 'Get Help': 'https://www.extremelycoolapp.com/help', 'Report a bug': "https://www.extremelycoolapp.com/bug", 'About': "# This is a header. This is an *extremely* cool app!" } ) ## set title of the app st.title(':shark: :red[Another App about Stock Information]') ## set general information a_tab_naming = [':owl: Analize', ':chart: View', ':chart_with_upwards_trend: Correlate', ':telescope: Forecast'] with st.expander('.. more information'): st.markdown('with this App you can \n ' + '- **analyse tickers** on analyst opinions -> see tab: ' + a_tab_naming[0] +' \n' + '- **view time serie charts** on tickers -> see tab: ' + a_tab_naming[1] +' \n' + '- **analyse dependencies** on tickers by correlation chart -> see tab: ' + a_tab_naming[2] +' \n' + '- forecast tickers (in progress) -> see tab: ' + a_tab_naming[3] +' \n' ) st.markdown(' :blue[_this App uses information from yfinance and yahooquery package, pw, Feb23_]') ######################################################################## ## Create 4 Tabs -> Analyse / View / Correlate / Forecast ######################################################################## tab1, tab2, tab3, tab4 = st.tabs( a_tab_naming ) ######################################################################## ## General Settings for Plots in Sidebar ######################################################################## with st.sidebar: st.title('Menu :red[_General Plot Settings_]') st.text('') col1, col2 = st.columns(2) with col1: a_plotly_width = int( st.text_input("Set Plot Width :", 1200)) a_plotly_msize = int( st.text_input("Set Marker Size:", 4 )) with col2: a_plotly_height = int( st.text_input("Set Plot Height:", int(a_plotly_width/2) )) a_plotly_lwidth = float( st.text_input("Set Line Width:", 1 )) st.text('') st.text('') st.text('') st.text('') st.text('') st.text('') ######################################################################## # Some Test Code at the beginning.. # Here We are getting ZURN financial information # We need to pass FB as argument for that ######################################################################## # getZHData = yf.Ticker("ZURN.SW") # whole python dictionary is printed here # print(getZHData.info) ######################################################################## ## Setup possible tickerlists to analyse ## get my own tickerlists .. ## additional NOTE: not available anymore 'BLS.SW' ######################################################################## a_tickerListDirectory = './data/tickerlists/' a_tickerListChoice = getTickerListsFromFolder(a_tickerListDirectory) #my_ticker_list = ['ZURN.SW','EVE.SW','MBTN.SW', 'ASWN.SW', \ # 'RCK.V', 'TKA.DE', 'F3C.DE', 'VOW3.DE', 'NOVO-B.CO', \ # 'ALJXR.PA', 'LIN', 'NEL.OL', 'BLDP', 'UUUU', 'SHOP', \ # 'ONON', 'BE', 'CLNE', 'RUN', 'SQ', 'EQNR', 'FCEL', \ # 'QS', 'PCELL.ST', 'PLUG', 'SNPS', 'CWR.L' ] #a_ticker_name = 'sp500' #tickers = si.tickers_sp500() #a_ticker_name = 'nasdaq_II' #tickers = si.tickers_nasdaq() #a_ticker_name = 'other' #tickers = si.tickers_other() #a_ticker_name = 'six_II' #tickers = swiss_tickers = a_six_list['Ticker'].values ######################################################################## ## Setting Sidebar Menu for Analysis ######################################################################## with st.sidebar: st.title('Menu :red[_Ticker_]') st.write('according to a choosen Ticker List there will be: \n ' + '- tickers loaded and shown for Overview \n' + '- time serie data loaded and visualized/analized') ########### a_ticker_choice_list = list(a_tickerListChoice.keys()) a_ticker_choice_list.sort() my_ticker_choice = st.selectbox('Choose Ticker List:', a_ticker_choice_list, key = a_ticker_choice_list[0] ) my_ticker_list = a_tickerListChoice[my_ticker_choice] ######################################################################## ## Setting some global Variables ######################################################################## ## get today / as time series should retrieved till today a_todayStr = datetime.today().strftime("%Y-%m-%d") ## Setup Info attributes for tickers -> used by method for yahooquery a_info_list_tot = {'financialData': ['numberOfAnalystOpinions', 'recommendationKey' ,'recommendationMean', 'currentPrice','targetMeanPrice','targetLowPrice','targetHighPrice'], 'defaultKeyStatistics' : ['forwardEps'], 'assetProfile' : ['industry', 'sector', 'website', 'longBusinessSummary'] } ## Setup Info attributes for tickers -> used by method for yfinance a_ticker_info = a_info_list_tot['financialData'] + a_info_list_tot['defaultKeyStatistics'] ######################################################################## ## Loading Data from query or file ######################################################################## ######################################################################## ## Setup Info for tickers ## an output filename a_outputFilename = './data/' + a_todayStr + '_recommendations_' + my_ticker_choice +'_yF.csv' data_load_state = st.text('Loading data... ' + a_outputFilename) # if not os.path.isfile(a_outputFilename): ## setup two routines because of problems by yfinance package: ## -> method with yfinance, not needed any more at the moment # a_InfoListOfTickers = getTickersInfo( my_ticker_list, a_ticker_info ) ## -> method with yahooquery -> get Analyst Information a_InfoListOfTickers_all, a_dummy = getYqTickerInfo(my_ticker_list, a_info_list_tot ) a_InfoListOfTickers = a_InfoListOfTickers_all.loc[:, ['ticker'] + a_info_list_tot['financialData'] + a_info_list_tot['defaultKeyStatistics'] ] a_AddInfoListOfTickers = a_InfoListOfTickers_all.loc[:, ['ticker'] + a_info_list_tot['assetProfile'] ] # add another column that compares EPS with currentPrice -> information about speculativ valuation in Price of ticker a_InfoListOfTickers['PricePerEPS'] = a_InfoListOfTickers['currentPrice']/a_InfoListOfTickers['forwardEps'] print(a_InfoListOfTickers) # write the output to a file a_InfoListOfTickers.to_csv( a_outputFilename ) # else: # a_InfoListOfTickers = pd.read_csv( a_outputFilename ) ######################################################################## ## Setup Info time series for tickers to be informed of ## including some indices to compare ## an output filename a_outputFilename = './data/' + a_todayStr +'_' + my_ticker_choice + '_data_myTS_FromTickers_yF.csv' if not os.path.isfile(a_outputFilename): indeces_to_compare = '^NDX ^GDAXI ^SSMI' a_tickerListStr = '' maxLength = len(my_ticker_list) for i in my_ticker_list[0:maxLength]: a_tickerListStr = a_tickerListStr + ' ' + i a_tickerListStr = indeces_to_compare + a_tickerListStr # get time series dataframe #print('komme hier durch ..') a_TS_FromTickers = getTickersTimeSeries(a_tickerListStr, "2022-01-01", a_todayStr, "1d") print(a_TS_FromTickers) a_TS_FromTickers.to_csv( a_outputFilename ) else: a_TS_FromTickers = pd.read_csv( a_outputFilename ) a_TS_FromTickers.set_index('Date', inplace=True) data_load_state.text('Loading data...done!') ######################################################################## ## normalize data to plot and compare on same scale ######################################################################## a_normInfoListOfTickers = getNormalizedTickersInfo(a_InfoListOfTickers, 'currentPrice', ['targetMeanPrice', 'targetLowPrice', 'targetHighPrice', 'forwardEps']) ## adapt targetLowPrice and targetHighPrice to plot in Errorbar Plot a_normInfoListOfTickers['targetLowPrice_norm'] = a_normInfoListOfTickers['targetMeanPrice_norm'] - a_normInfoListOfTickers['targetLowPrice_norm'] a_normInfoListOfTickers['targetHighPrice_norm'] = a_normInfoListOfTickers['targetHighPrice_norm'] - a_normInfoListOfTickers['targetMeanPrice_norm'] # search nan and set it to zero a_normInfoListOfTickers = a_normInfoListOfTickers.fillna(0) ######################################################################## ## Data Loading and Preparation done (from query or file) ######################################################################## ## global sidebar setting for data handling and information with st.sidebar: with st.expander('following tickers are in the list ..'): st.table(a_AddInfoListOfTickers.loc[:, ['ticker','industry', 'sector']]) st.text('') st.text('') st.text('') st.text('') st.text('') st.text('') ## set title st.title('Menu :red[_Time Serie Range_]') st.write('according to a choosen time range there will be: \n ' + '- Time Serie Visualization done \n' + '- time serie data analized') a_startTime, a_endTime = st.select_slider('select range to compare:', list(a_TS_FromTickers.index), value = [list(a_TS_FromTickers.index)[0], list(a_TS_FromTickers.index)[len(list(a_TS_FromTickers.index))-1]] ) ######################################################################## ## for Tab Content / streamlit presentation ######################################################################## ######################################################################## ## Tab1: Analysis ## Input in this Tab: a_normInfoListOfTickers ## a_InfoListOfTickers ## a_AddInfoListOfTickers ######################################################################## with tab1: ## set Title st.title(':telescope: Menu :red[_Ticker Overview_]') st.text('') if tab1.checkbox('show tickers data'): tab1.dataframe(a_InfoListOfTickers) ## set the Menu for Visualization Plot st.markdown('## Select shown tickers by Options:') col1, col2 = st.columns(2, gap="large") with col1: #numOfAnalists = float( st.text_input(":red[Number of Analyists Opinion] greater or equal:", 1)) a_range_df = a_InfoListOfTickers['numberOfAnalystOpinions'].dropna() #a_range_list = list(range( int(min(a_range_df)), int(max(a_range_df) ))) a_range_list = list(range( int(0.0), int(max(a_range_df) + 1.0 ))) a_minNumAnalists, a_maxNumAnalists = st.select_slider('Option 1: :red[Number of Analyists Opinion] within range:', a_range_list, value = [ min(a_range_list), max(a_range_list)]) with col2: #recomMean = float( st.text_input("Option 2: :red[Recommandation Mean] lower or equal (value: 1-5):", 3)) a_minRecomMean, a_maxRecomMean = st.select_slider('Option 2: :red[Recommandation Mean] lower or equal (value: 1-5):', np.arange(1.0,5.1, 0.1).round(2), value = [1.0, 3.0]) #put the filter values in action a_normToDisplay = a_normInfoListOfTickers.loc[(a_normInfoListOfTickers['numberOfAnalystOpinions'] >= a_minNumAnalists) & (a_normInfoListOfTickers['numberOfAnalystOpinions'] <= a_maxNumAnalists) & (a_normInfoListOfTickers['recommendationMean'] >= a_minRecomMean) & (a_normInfoListOfTickers['recommendationMean'] <= a_maxRecomMean)] st.text('') st.text('') tab1.markdown('### _Data is :red[normalized] - :red[current Price] for each ticker is equal :red[1]_') st.write('### Number of tickers fullfilling criteria: ', a_normToDisplay.shape[0], ' out of ', a_normInfoListOfTickers.shape[0]) ######################################################################## ## doing a plotly figp = px.scatter(a_normToDisplay, x = 'ticker', y = 'targetMeanPrice_norm', error_y='targetHighPrice_norm', error_y_minus='targetLowPrice_norm', color = 'recommendationKey', size = 'numberOfAnalystOpinions', hover_data=a_ticker_info, labels={'x': 'ticker name', 'y':'normalized Mean'} ) figp.update_layout(autosize=True) figp.update_layout(width = a_plotly_width, height = a_plotly_height) figp.add_shape( # add a horizontal "target" line type="line", line_color="salmon", line_width=3, opacity=1, line_dash="dot", x0=0, x1=1, xref="paper", y0=1, y1=1, yref="y") #figp.update_traces(marker=dict(color='red')) figp.update_traces(line_color='red') #figp.update_layout(width=800, height=600) tab1.write(figp) st.write('') st.write('### :red[learn more about] a certain :red[ticker]: \n' + '(only tickers selectable which meet criteria)') col1, col2, col3 = st.columns(3) with col1: a_InsideSelectionList = list(a_normToDisplay['ticker']) a_InsideToTicker = st.selectbox('select a ticker', a_InsideSelectionList) a_dfInsideOfTicker = a_AddInfoListOfTickers.loc[ (a_AddInfoListOfTickers['ticker'] == a_InsideToTicker) ] a_dfInsideOfTickerVal = a_InfoListOfTickers.loc[ (a_InfoListOfTickers['ticker'] == a_InsideToTicker) ] #st.write('Overview ') st.write('Industry : ', a_dfInsideOfTicker['industry'].values[0]) st.write('Sector : ', a_dfInsideOfTicker['sector'].values[0]) st.write('Web-Site : ', a_dfInsideOfTicker['website'].values[0]) with col2: st.write('**Valuation**') for i_key in a_dfInsideOfTickerVal.keys(): i_value = a_dfInsideOfTickerVal[i_key].values[0] if not isinstance( i_value, str ): st.write(i_key + ': ' + str(i_value) ) else: st.write(i_key + ': ' + i_value ) with col3: st.write('**Business Information**') st.write('', a_dfInsideOfTicker['longBusinessSummary'].values[0]) ######################################################################## ## Tab2: Charts ######################################################################## with tab2: ## set title st.title(':crystal_ball: Menu :red[_Ticker Time Serie_]') st.text('') # st.title('Menu :red[_Ticker Time Serie_]') # st.text('') st.markdown('### select ticker for time serie:') a_filter_object = filter(lambda a: 'Close' in a, a_TS_FromTickers.columns.values) a_selectionList = list(a_filter_object) a_selectionList.sort() if a_InsideToTicker+'_Close' in a_selectionList: a_selectedTickers = st.multiselect('select tickers', a_selectionList,['^NDX_Close','^GDAXI_Close','^SSMI_Close', a_InsideToTicker+'_Close'] ) else: _selectedTickers = st.multiselect('select tickers', a_selectionList,['^NDX_Close','^GDAXI_Close','^SSMI_Close'] ) if st.checkbox('show ticker time series data'): st.dataframe(a_TS_FromTickers[a_startTime : a_endTime]) # show the selected tickers chart if st.checkbox('show selected ticker time series plot'): #st.markdown('Selected Tickers times serie data') #st.line_chart( a_TS_FromTickers, y = a_selectedTickers ) ######################################################################## ## doing a plotly figpt = px.scatter(a_TS_FromTickers[a_startTime : a_endTime], y = a_selectedTickers) figpt.update_traces(mode='markers+lines', marker = dict(size = a_plotly_msize), line = dict(width=a_plotly_lwidth, dash='dot') ) figpt.update_layout(autosize=True) figpt.update_layout(width = a_plotly_width, height = a_plotly_height) st.write(figpt) #normalize a_TS_FromTickers[a_selectedTickers] a_normalizedTS_FromTickers = a_TS_FromTickers[a_startTime : a_endTime][a_selectedTickers]/a_TS_FromTickers[a_startTime : a_endTime][a_selectedTickers].iloc[0] if st.checkbox('show normalized ticker time series data'): st.dataframe(a_normalizedTS_FromTickers) # show the selected tickers chart st.markdown('Selected Tickers times serie data :red[normalized]') #st.line_chart( a_normalizedTS_FromTickers, y = a_selectedTickers ) ######################################################################## ## doing a plotly figptnorm = px.scatter(a_normalizedTS_FromTickers, y = a_selectedTickers) figptnorm.update_traces(mode='markers+lines', marker = dict(size = a_plotly_msize), line = dict(width=a_plotly_lwidth, dash='dot') ) figptnorm.update_layout(autosize=True) figptnorm.update_layout(width = a_plotly_width, height = a_plotly_height) st.write(figptnorm) ######################################################################## ## Tab3: Correlation ######################################################################## with tab3: ## set titel st.title(':mag: Menu :red[_Ticker Correlation_]') st.text('') #st.title(':mag: Menu :red[_Ticker Correlation_]') col1, col2 = st.columns(2) a_filter_object = filter(lambda a: 'Close' in a, a_TS_FromTickers.columns.values) a_selectionList = list(a_filter_object) a_selectionList.sort() with col1: a_corrTicker1 = st.selectbox('select ticker 1', a_selectionList, a_selectionList.index('^SSMI_Close') ) a_corrTicker1Data = a_TS_FromTickers[a_startTime : a_endTime][a_corrTicker1] with col2: if a_InsideToTicker+'_Close' in a_selectionList: a_corrTicker2 = st.selectbox('select ticker 2', a_selectionList, a_selectionList.index(a_InsideToTicker+'_Close')) else: a_corrTicker2 = st.selectbox('select ticker 2', a_selectionList) a_corrTicker2Data = a_TS_FromTickers[a_startTime : a_endTime][a_corrTicker2] if a_corrTicker1 == a_corrTicker2: a_corrTicker2 = a_corrTicker1 + '_' a_corrTicker2Data.name = a_corrTicker2 a_corrTickerResult = pd.concat([a_corrTicker1Data, a_corrTicker2Data], axis=1).reindex(a_corrTicker1Data.index) a_corrTickerResult.dropna(inplace=True, axis=0) if (len(a_corrTickerResult) != 0): slope, intercept, r_value, p_value, std_err = stats.linregress(a_corrTickerResult[a_corrTicker1], a_corrTickerResult[a_corrTicker2]) st.write('### **_Result of :red[Correlation Analysis]_**') col1, col2, col3 = st.columns(3, gap="small") with col1: st.write( '**slope :** {:2.2E} +/- {:2.2E}'.format(slope, std_err) ) st.write( '**p-value for Hypt. "no slope":** {:2.2E}'.format(p_value) ) # with col2: # st.text('') with col2: st.write( '### **r-value:** :red[{:4.2f}]'.format(r_value) ) st.write( '(r-value: the percentage of the variance in the dependent variable. ' + 'The closer its value is to 1, the more variability the model explains.)') else: st.error( ':red[something went wrong - no match (i.e. on timescale) to correlate ..]',icon=':unamused:') if st.checkbox('show correlation line'): if (len(a_corrTickerResult) != 0): a_corrTickerResult['corrLine'] = intercept + slope*a_corrTickerResult[a_corrTicker1] a_corrTickerY = [a_corrTicker2, 'corrLine'] else: a_corrTickerY = a_corrTicker2 else: a_corrTickerY = a_corrTicker2 figcorr = px.scatter(a_corrTickerResult, x = a_corrTicker1, y = a_corrTickerY) figcorr.update_traces(mode='markers+lines', marker = dict(size = a_plotly_msize), line = dict(width=a_plotly_lwidth, dash='dot') ) figcorr.update_layout(autosize=True) figcorr.update_layout(width = a_plotly_width, height = a_plotly_height) tab3.write(figcorr) ######################################################################## ## Tab4: forecast ######################################################################## ## set Title tab4.title(':telescope: Menu :red[_Forecasting .._]') tab4.markdown('The content of this tab is still in ideation phase - so, ' + 'no expectations :stuck_out_tongue_closed_eyes: allowed .. ')
philwenkch/stock_analysis_V1
stock_analysis_main_V2.py
stock_analysis_main_V2.py
py
30,241
python
en
code
0
github-code
13
17675629576
import re import numpy as np import pandas as pd import support from sklearn.model_selection import KFold, cross_validate from sklearn.svm import SVC, SVR from sklearn.gaussian_process import GaussianProcessClassifier, GaussianProcessRegressor from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor from sklearn.neural_network import MLPClassifier, MLPRegressor """ Training Dataset as: https://archive.ics.uci.edu/ml/datasets/Iris/ https://archive.ics.uci.edu/ml/datasets/Connectionist+Bench+%28Sonar%2C+Mines+vs.+Rocks%29 https://archive.ics.uci.edu/ml/machine-learning-databases/glass/ https://archive.ics.uci.edu/ml/datasets/Airfoil+Self-Noise https://archive.ics.uci.edu/ml/datasets/Wine+Quality """ if __name__ == '__main__': # ベンチマークとなるアルゴリズムと、アルゴリズムを実装したモデルの一覧 models = [ ('SVM', SVC(gamma='auto', random_state=1), SVR(gamma='auto')), ('GaussianProcess', GaussianProcessClassifier(random_state=1), GaussianProcessRegressor(normalize_y=True, alpha=1, random_state=1)), ('KNeighbors', KNeighborsClassifier(), KNeighborsRegressor()), ('MLP', MLPClassifier(hidden_layer_sizes=(100,), random_state=1), MLPRegressor(hidden_layer_sizes=(5,), solver='lbfgs', random_state=1)), ] # 検証用データセットのファイルと、ファイルの区切り文字、 # ヘッダーとなる行の位置、インデックスとなる列の位置のリスト classifier_files = ['iris.data', 'sonar.all-data', 'glass.data'] classifier_params = [(',', None, None), (',', None, None), (',', None, 0)] regressor_files = ['airfoil_self_noise.dat', 'winequality-red.csv', 'winequality-white.csv'] regressor_params = [(r'\t', None, None), (';', 0, None), (';', 0, None)] # 評価スコアを、検証用データセットのファイル、アルゴリズム毎に保存する表 result = pd.DataFrame(columns=['dataset', 'function'] + [model[0] for model in models], index=range(len(classifier_files + regressor_files) * 2)) # クラス分類アルゴリズムの評価 column_num = 0 for i, (file, param) in enumerate(zip(classifier_files, classifier_params)): # ファイルの読み込み df = pd.read_csv(file, sep=param[0], header=param[1], index_col=param[2], engine='python') train = df[df.columns[:-1]].values target, clz = support.clz_to_prob(df[df.columns[-1]]) # 結果の表を事前に作成する # ファイル名からデータセットの種類と、評価関数用の行を作る result.loc[column_num, 'dataset'] = re.split(r'[._]', file)[0] result.loc[column_num + 1, 'dataset'] = '' result.loc[column_num, 'function'] = 'F1Score' result.loc[column_num + 1, 'function'] = 'Accuracy' # 全アルゴリズムで評価を行う for model_name, classifier, _ in models: # sklearnの関数で交差検証した結果のスコアを取得 kf = KFold(n_splits=5, random_state=1, shuffle=True) score = cross_validate(classifier, train, target.argmax(axis=1), cv=kf, scoring=('f1_weighted', 'accuracy')) result.loc[column_num, model_name] = np.mean(score['test_f1_weighted']) result.loc[column_num + 1, model_name] = np.mean(score['test_accuracy']) column_num += 2 # 回帰アルゴリズムの評価 for i, (file, param) in enumerate(zip(regressor_files, regressor_params)): # ファイルの読み込み df = pd.read_csv(file, sep=param[0], header=param[1], index_col=param[2], engine='python') train = df[df.columns[:-1]].values target = df[df.columns[-1]].values.reshape((-1,)) # 結果の表を事前に作成する # ファイル名からデータセットの種類と、評価関数用の行を作る result.loc[column_num, 'dataset'] = re.split(r'[._]', file)[0] result.loc[column_num + 1, 'dataset'] = '' result.loc[column_num, 'function'] = 'R2Score' result.loc[column_num + 1, 'function'] = 'MeanSquared' # 全アルゴリズムで評価を行う for model_name, _, regressor in models: # sklearnの関数で交差検証した結果のスコアを取得 kf = KFold(n_splits=5, random_state=1, shuffle=True) score = cross_validate(regressor, train, target, cv=kf, scoring=('r2', 'neg_mean_squared_error')) result.loc[column_num, model_name] = np.mean(score['test_r2']) # 符号を反転させ、もとの二条平均誤差を取得 result.loc[column_num + 1, model_name] = -np.mean(score['test_neg_mean_squared_error']) column_num += 2 pd.set_option('display.width', 400) pd.set_option('display.max_columns', 10) print(result) result.to_csv('baseline.csv', index=None)
sigu1011/ensemble_learning
baseline.py
baseline.py
py
4,972
python
en
code
0
github-code
13
72396573779
from django.urls import path from . import views urlpatterns = [ path('', views.index), path('inquiries', views.inquiries, name="inquiries"), path('responses', views.responses, name="responses"), path('applications', views.applications, name="applications"), path('login', views.loginPage, name="login"), path('register', views.registerPage, name="register"), path('logout', views.logoutUser, name="logout"), ]
DeanNandi/Agcrm
crmpage/urls.py
urls.py
py
426
python
en
code
0
github-code
13
70351295378
import operator from functools import reduce def deep_get(path, obj): try: return reduce(operator.getitem, path, obj) except: return None def apply_projection(projection, obj): if isinstance(projection, Mapper): return projection.apply(obj) elif isinstance(projection, list): return deep_get(projection, obj) elif isinstance(projection, str): return obj[projection] raise Exception('Wrong projection type') class Mapper: def __init__(self): self.keys = [] self.props = {} self.projections = {} self.list_projections = {} def has_prop(self, key): return key in self.props def has_projection(self, key): return key in self.projections def has_list_projection(self, key): return key in self.list_projections def has_key(self, key): return self.has_prop(key) or self.has_projection(key) or self.has_list_projection(key) def prop(self, key, value): if self.has_key(key): raise Exception('Key "{}" already exist'.format(key)) self.props[key] = value self.keys.append(key) def project_one(self, key, mapper): if self.has_key(key): raise Exception('Key "{}" already exist'.format(key)) self.projections[key] = mapper self.keys.append(key) def project_list(self, key, mapper): if self.has_key(key) and not self.has_list_projection(key): raise Exception('Key "{}" already exist'.format(key)) if not self.has_list_projection(key): self.list_projections[key] = [] self.list_projections[key].append(mapper) self.keys.append(key) def apply(self, obj): result = {} for key in self.keys: if key in self.props: result[key] = self.props[key] elif key in self.projections: result[key] = apply_projection(self.projections[key], obj) elif key in self.list_projections: result[key] = [] for projection in self.list_projections[key]: result[key].append(apply_projection(projection, obj)) return result
J7DpeBK0Wt/backend
mapper.py
mapper.py
py
2,238
python
en
code
0
github-code
13
711109795
n = int(input()) l1 = list(map(int,input().split())) ma = pow(10,7) ind1= [0]*(ma) ind2 = [0]*(ma) temp=0 for i in range(n): r = sum(l1) l = 0 for j in range(i,n): l+= l1[j] r = r - l1[j] if(n== j-i+1 or l/(j-i+1)>r/(n-j+i-1)): #print(r) ind1[temp] = i+1 ind2[temp] = j+1 #print(ind1[temp],ind2[temp]) temp+=1 print(temp) for i,j in zip(ind1,ind2): if i>0 or j>0: print(i,j)
parasjain-12/HackerEarth-Solution
Finding the Subarrays.py
Finding the Subarrays.py
py
485
python
en
code
162
github-code
13
14575695096
from decimal import * def getset(n): getcontext().prec = 1000 return {x:str(Decimal(1)/Decimal(x)) for x in xrange(1,n+1)} def cyclic(p): b = 10 t = 0 r = 1 n = 0 while True: t += 1 x = r*b d = int(x/p) r = x % p n = n*b+d if r == 1: break if t == p-1: return True return False
kryptn/euler
p26.py
p26.py
py
390
python
en
code
0
github-code
13
12787366950
from .helper import in_to_mm, lbf_to_newtons import matplotlib.pyplot as plt import numpy as np class SpecimenTest(object): ''' SpecimenTest object - combines a specimen with a material and test data. ''' class TestResults(object): def __init__(self, Jc = None, KJc = None, KJc_valid_1T = None): ''' Constructor Arguments --------------------- Jc: float Jc value in MPa-mm. KJc: float KJc value in MPa-mm. KJc_valid_1T: float Maximum valid KJc value based on size constraints in MPa-mm. ''' self.Jc = Jc self.KJc = KJc self.KJc_valid_1T = KJc_valid_1T def __init__(self, specimen, material, temperature, data, elastic_ul): ''' Constructor Arguments --------------------- specimen: Specimen Specimen object. material: Material Material object. temperature: float Temperature of test run (C). data: dict Dict with keys "cod", "disp", and "force" containing equal length numpy float arrays. elastic_ul: float Upper limit of elastic behavior in force units to determine C0 for analysis. ''' self.specimen = specimen self.material = material self.temperature = temperature assert len(data["cod"]) == len(data["force"]) and len(data["cod"]) == len(data["disp"]) self.data = data self.elastic_ul = elastic_ul self.results = SpecimenTest.TestResults() self._analyze() def __str__(self): return "<SpecimenTest at %s>" % (hex(id(self)),) def __repr__(test): return self.__str__() def _analyze(self): ''' KJc calculations on test data. ''' # create and initialize test variables for cleaner code (convert lengths to m) P = np.array(self.data["force"]) cod = np.array(self.data["cod"]) / 1000.0 a0_over_W = self.specimen.a0 / self.specimen.W a_over_W = (self.specimen.a0 + cod) / self.specimen.W S = self.specimen.S / 1000.0 B = self.specimen.B / 1000.0 BN = self.specimen.BN / 1000.0 W = self.specimen.W / 1000.0 b0 = (self.specimen.W - self.specimen.a0) / 1000.0 # calculate cod area <A> using trapezoidal rule n = len(P) force_int_avg = 0.5 * P[:n - 1] + 0.5 * P[1:] cod_diff = np.diff(cod) A = np.concatenate([np.array([0]), np.cumsum(cod_diff * force_int_avg)]) # calculate Ke (convert Pa root m to MPa root m in final Ke calculation) k1 = P * S / ((B * BN)**(0.5) * W**(1.5)) f1 = 1.5 * a0_over_W**(0.5) / (1 + 2 * a0_over_W) f2 = 1.99 - a0_over_W * (1.0 - a0_over_W) * (2.15 - 3.93 * a0_over_W + 2.7 * a0_over_W**2) f3 = (1.0 - a0_over_W)**(1.5) Ke = 1e-6 * k1 * f1 * f2 / f3 # calculate Je Je = (1.0 - self.material.poisson_ratio**2) * Ke**2 / self.material.E # calculate Jp (convert final Jp from trimmed_index = P <= self.elastic_ul C0 = np.var(cod[trimmed_index]) / np.cov(cod[trimmed_index], P[trimmed_index])[0, 1] nu = 3.667 - 2.199 * a_over_W + 0.4376 * a_over_W**2 Ap = A - 0.5 * C0 * P**2 Ap[:max(np.arange(len(Ap))[Ap < 0]) + 1] = 0.0 Jp = 1e-6 * nu * Ap / (BN * b0) # calculate Jc (units MPa * m) self.results.Jc = (Je + Jp)[-1] # calculate KJc self.results.KJc = np.sqrt(self.results.Jc * self.material.E / (1.0 - self.material.poisson_ratio**2)) # calculate KJc validity limit self.results.KJc_valid_1T = np.sqrt(self.material.E * b0 * self.material.fy / (30 * (1 - self.material.poisson_ratio**2))) def update(self, specimen = None, material = None, data = None, elastic_ul = None): ''' Update test results based on a change in Specimen Test parameters. Arguments set to equal None will remain unchanged in the updated results analysis. Arguments --------- specimen: Specimen New test specimen object. material: Material New material object data: dict New data as described in documentation of instance constructor. elastic_ul: float Process data using new elastic_ul for determining initial slope. ''' if isinstance(specimen, Specimen): self.specimen = specimen if isinstance(material, Material): self.material = material if data: assert len(data["cod"]) == len(data["force"]) and len(data["cod"]) == len("disp") self.data = data if elastic_ul: self.elastic_ul = elastic_ul self._analyze() def plot_data(self, grid = True, show = True, english_units = False): ''' Plot specimen test data. Arguments --------- grid: bool If True, include xy grid on plot. show: bool If True, show plot in window. english_units: bool If True, plot in English units in. and lbf (default is SI -- mm and N). Returns ------- A tuple containing the plot figure object and the plot axes object. ''' if english_units == True: funits, lunits = "lbf", "in." c, f = self.data["cod"] / in_to_mm(1.0), self.data["force"] / lbf_to_newtons(1) else: funits, lunits = "N", "mm" c, f = self.data["cod"], self.data["force"] fig = plt.figure(self.specimen.id + " Test Data") axes = fig.add_subplot(111) axes.plot(c, f, "r-") plt.autoscale() plt.xlim([0.0, plt.xlim()[1]]) plt.xlabel("Crack Opening Displacement (%s)" % (lunits,)) plt.ylim([0.0, plt.ylim()[1]]) plt.ylabel("Load (%s)" % (funits,)) plt.grid(grid) if show: plt.show() return fig, axes
btcross26/astm_e1921_analysis-Python-3-Package
astm_e1921_analysis/SpecimenTest.py
SpecimenTest.py
py
6,312
python
en
code
0
github-code
13
9203536446
def translate_class_to_module(class_name): translation = { "SlurmAPIResource": "slurm_api_resource", "LocalResource": "local_resource", "LocalFileSystemStorage": "local_file_system_storage", "HubmapLocalFileSystemStorage": "hubmap_local_file_system_storage", "GlobusUserAuthentication": "globus_user_authentication", "LocalUserAuthentication": "local_user_authentication", "PSCAPIUserAuthentication": "psc_api_user_authentication", "JupyterLabJob": "jupyter_lab_job", "LocalTestJob": "local_test_job", } try: return translation.get(class_name) except Exception as e: raise e def generate_controller_object(class_name, module_type, params): try: o = getattr( __import__( f"user_workspaces_server.controllers.{module_type}.{translate_class_to_module(class_name)}", fromlist=[class_name], ), class_name, )(**params) return o except Exception as e: raise e
hubmapconsortium/user_workspaces_server
src/user_workspaces_server/utils.py
utils.py
py
1,068
python
en
code
0
github-code
13