blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 3 281 | content_id stringlengths 40 40 | detected_licenses listlengths 0 57 | license_type stringclasses 2
values | repo_name stringlengths 6 116 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 313
values | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 18.2k 668M ⌀ | star_events_count int64 0 102k | fork_events_count int64 0 38.2k | gha_license_id stringclasses 17
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 107
values | src_encoding stringclasses 20
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 4 6.02M | extension stringclasses 78
values | content stringlengths 2 6.02M | authors listlengths 1 1 | author stringlengths 0 175 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2f9970afd8dc86fde020b29ed5cf098fe97b5060 | e682bb0223fcce71c295f250bcce48a321755ff4 | /users/views/auth.py | 362b1bf90099bfc34a7ba130e6fbf51a750ff225 | [] | no_license | jatinj615/IMDB_Movies | dbcf0fc0ff5fe346c24055fff1061adc10a61cb5 | c653719e3b5340136130cba138251c9c82d250da | refs/heads/master | 2022-12-15T05:16:31.102606 | 2020-09-03T10:53:25 | 2020-09-03T10:53:25 | 291,470,147 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,208 | py | from django.contrib.auth import get_user_model, authenticate, login, logout
from users.serializer import UserSerializer
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from rest_framework import permissions, generics
from rest_framework.authtoken.models import Token
from rest_framework.authentication import TokenAuthentication
class CreateUserView(generics.CreateAPIView):
"""
Registration for new User
"""
permission_classes = [permissions.AllowAny]
serializer_class = UserSerializer
class LoginView(APIView):
"""
User Login and auth token Generation
"""
def post(self, request, *args, **kwargs):
user = authenticate(username=self.request.data['username'], password=self.request.data['password'])
if user is not None:
token, created = Token.objects.get_or_create(user=user)
response_data = {'token': token.key}
return Response(response_data, status=HTTP_200_OK)
else:
return Response(status=404)
| [
"jatinj615@gmail.com"
] | jatinj615@gmail.com |
d1f60f117c2794ee87cdab12e34717d84cfa428a | 11f8bea6d15d951584af72eb5d193d84200b1cf0 | /fisher.py | 40d350fda24178a5934450c358160ca632fcbdca | [
"MIT"
] | permissive | ttkltll/fisher_review | 3a4e871808b9b385e0077413a79ddb1e78ce2a28 | e0afbb0d5f5f73890dc772eb2b6b34ddd31e6909 | refs/heads/master | 2022-12-10T17:08:43.272520 | 2022-01-25T13:03:42 | 2022-01-25T13:03:42 | 251,500,916 | 0 | 1 | MIT | 2022-12-08T11:11:17 | 2020-03-31T04:32:49 | HTML | UTF-8 | Python | false | false | 200 | py | from flask import current_app
from app import creat_app
app = creat_app()
if __name__ == '__main__':
app.run(debug=app.config['DEBUG'], host='0.0.0.0', port=83, threaded=True)
a = current_app | [
"ttkltll@163.com"
] | ttkltll@163.com |
7fc1aab1de73aa78dbb82daf249adb798a862e6e | ac0e9a702e73739209b24ba3f6d9297647e06b76 | /Example Files/Intermediate/phonebook_example_unittest/test_phonebook.py | 877ed597231b563f8b52e0fd04a0c7d5d40c137e | [] | no_license | afettouhi/PyStudentManager | 9c256c38b20136f10b86fb2e2270bb5848be802d | 71343bc52e5426b2267f068bd6af2e66c0807f08 | refs/heads/master | 2020-05-14T16:53:09.501889 | 2019-06-07T14:22:44 | 2019-06-07T14:22:44 | 181,881,548 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,424 | py |
import unittest
from phonebook import Phonebook
class PhonebookTest(unittest.TestCase):
def setUp(self):
self.phonebook = Phonebook()
def test_lookup_entry_by_name(self):
self.phonebook.add("Bob", "12345")
self.assertEqual("12345", self.phonebook.lookup("Bob"))
def test_missing_entry_raises_KeyError(self):
with self.assertRaises(KeyError):
self.phonebook.lookup("missing")
def test_empty_phonebook_is_consistent(self):
self.assertFalse(self.phonebook.is_consistent())
def test_phonebook_with_normal_entries_is_consistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "012345")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_with_duplicate_entries_is_inconsistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "12345")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_with_numbers_that_prefix_one_another_is_inconsistent(self):
self.phonebook.add("Bob", "12345")
self.phonebook.add("Mary", "123")
self.assertTrue(self.phonebook.is_consistent())
def test_phonebook_adds_names_and_numbers(self):
self.phonebook.add("Sue", "12345")
self.assertIn("Sue", self.phonebook.get_names())
self.assertIn("12345", self.phonebook.get_numbers())
| [
"A.Fettouhi@gmail.com"
] | A.Fettouhi@gmail.com |
cc5515d7903dd5d1401aa6e1d9173cbf64dabc11 | f0203795f07da31ac6255c93143cd117ff5cc364 | /modeling_your_life.py | d4f117eb97139290f7a620cabc6fca2225f6177a | [] | no_license | Rick-and-morty/life-model | a8ad0bb230804cf77bcca6012d6a145ef36251e6 | 424c4eb1edb1f98b807e7ea9541a422f175ad11b | refs/heads/master | 2021-01-17T15:13:23.200355 | 2016-09-29T04:26:48 | 2016-09-29T04:26:48 | 69,496,596 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,246 | py |
class Activities:
def __init__(self):
self.music = ["all the fun and enjoyment i get from being creative."]
self.jogging = ["what i do to get think about my next coding challenge."]
self.tv = ["the thing that makes me giggle at the end of a bad day."]
self.stress = ["all the things going on in my head"]
self.time = ["hours in the day"]
self.the_struggle = ["all of the things that make us who we are"]
class My_old_band:
def __init__(self):
self.travis = "the ridculous drunk, but still my good friend"
self.jon = "the guy who always had my back, even at the worst times"
self.tyler = "me, haha no really sure what to say"
life = Activities()
shows = My_old_band()
if life.tv + life.music != life.time:
print(life.stress)
else:
print("take a break and clear your head")
print(life.the_struggle)
print("tyler, you never jog, don't lie to yourself, or joel hahahahah, you just think")
shows.travis = input("is travis sober? Y/n ")
if "n":
print("looks like things are going to get interesting again!")
else:
print("looks like we may have a show worth a damn for once!")
# the best i could do on today's assignment
# git hub is angering me
| [
"te5840@gmail.com"
] | te5840@gmail.com |
3661068f55bf7363d35a03604516c6ba1cbbe911 | 057936d2afa69cecb86c7cc687a61801d08d9af3 | /manage.py | 7c40f287e65ed18649aa02aae50c02447d407af2 | [
"MIT"
] | permissive | sigmarising/web_innovation | bdfd4c7d0f3c6ecc84a6101b1d35abb180272e89 | 6f5425f34c3aff593f321297ed9aae38d1a51f17 | refs/heads/master | 2021-06-11T22:40:47.989912 | 2021-04-13T08:18:25 | 2021-04-13T08:18:25 | 154,326,215 | 1 | 0 | MIT | 2018-12-20T06:43:51 | 2018-10-23T12:43:08 | JavaScript | UTF-8 | Python | false | false | 540 | py | #!/usr/bin/env python
import os
import sys
if __name__ == '__main__':
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'web_site.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
| [
"sigmarising@hotmail.com"
] | sigmarising@hotmail.com |
ba5adc914d657335775749282968cb3baf05d633 | a0bb621d05af3a68f36dbdaf475531f9061009c7 | /multilayer/examples/2_LEG+RPA_plasmon_dispersion_in_MLG.py | d4d4382f8f11e90ab1dce5a1d3bb14660060ecd8 | [
"MIT"
] | permissive | rhambach/EELcalc | 892d90f626ec05cd72235f1e1e7715a213adf8d5 | b8ac985ab881721ff7a95e529dccb6ce1c85a243 | refs/heads/master | 2020-06-05T13:58:39.172009 | 2017-11-19T10:31:02 | 2017-11-19T10:31:02 | 27,860,640 | 2 | 1 | null | null | null | null | UTF-8 | Python | false | false | 3,287 | py | """
Plot script for the dispersion of the pi-plasmon in N-layer
gaphene with in-plane momentum q (see [1] Fig. 2c).
REFERENCES:
[1] Wachsmuth, Hambach, Benner, and Kaiser, PRB (90) 235434 (2014)
Copyright (c) 2014, rhambach, P. Wachsmuth.
This file is part of the EELcalc package and released
under the MIT-Licence. See LICENCE file for details.
"""
import numpy as np;
import matplotlib.pylab as plt;
import scipy.stats as stats;
from scipy.optimize import curve_fit;
from EELcalc.monolayer.graphene import rpa;
from EELcalc.multilayer.leg import areels;
from EELcalc.tools.DPio.dp_mdf import broad;
dispersion=[];
_debug= True;
BOHR = 0.529177; # [Angstrom]
def _fitplot(E,spectrum,mask,fit):
ax=_fitplot.ax;
offset=_fitplot.count;
ax.plot(E,spectrum+offset,'k-');
ax.plot(E[mask],spectrum[mask]+offset,'b-');
ax.plot(E[mask],fit+offset,'r-');
_fitplot.count+=0.2;
if _debug:
_fitplot.count=0;
_fitplot.ax=plt.figure().add_subplot(111);
plt.title(u'DEBUG: test fitting of $\pi$-plasmon position')
def find_max(spectrum,E,Emin=0,Emax=np.inf,debug=False):
# finding maximum in spectrum within fixed energy range [eV]
mask = np.logical_and(Emax>E, E>Emin);
imax = spectrum[mask].argmax();
A0 = (spectrum[mask])[imax];
E0 = (E[mask])[imax];
if debug: _fitplot(E,spectrum/A0,mask,np.arange(sum(mask))==imax);
return (A0,E0);
def fit_lorentz(spectrum,E,dE,Emin=0,Emax=np.inf,debug=False):
# fit lorentz function in a range +-dE around the maximum
# returns fit parameters [a,x0,sigma]
A0,E0 = find_max(spectrum,E,Emin=Emin,Emax=Emax); # get starting point
mask = np.logical_and(E0+dE>E, E>E0-dE);
f = lambda x,a,x0,sigma: a/((x-x0)**2 + sigma**2);
popt, pcov = curve_fit(f, E[mask], spectrum[mask],p0=(A0,E0,dE))
if debug: _fitplot(E,spectrum/A0,mask,f(E[mask],*popt)/A0);
return popt
# setup calculations
Nmax=6;
DP = rpa.GrapheneDP(qdir='GM',verbosity=0);
d = 3.334 / BOHR; # interlayer distance [a.u]
q_calc = np.sort((DP.get_q()));
q_calc = q_calc[q_calc<0.5]; # restrict range of q
# iterate over different multilayers
fig=plt.figure(); ax=fig.add_subplot(111);
cm=plt.cm.cool; # colors
colors = ['k']+[ cm(x) for x in np.linspace(0,1,Nmax) ];
for N in [0]+range(Nmax,0,-1):# 0=graphite
print " calculating %d-layer system ..."%N;
if N==0: ML = areels.InfiniteStack(d,DP.get_pol2D); # graphite
else: ML = areels.Multilayer(N,d,DP.get_pol2D); # multilayer
disp=[];
for q in q_calc:
eels = ML.areel(q,0); # qz=0, only in-plane spectra
if q<0.01: # lorentz fit does not work for q=0
A0,E0=find_max(eels,DP.E,Emin=3,Emax=8,debug=_debug);
else:
A,E0,width=fit_lorentz(eels,DP.E,0.7,Emin=2,Emax=13,debug=_debug);
disp.append([q/BOHR,E0]); # q [1/A], E0 [eV]
disp=np.asarray(disp);
plt.plot(disp[:,0],disp[:,1],c=colors[N],label='N=%d'%N if N>0 else 'graphite');
plt.title('Dispersion in multilayer graphene');
plt.suptitle('(layered-electron-gas model + RPA ab-initio calculations for graphene polarizability)');
plt.xlabel('momentum transfer q [1/A]');
plt.ylabel('energy [eV]');
plt.legend(loc=4);
plt.show();
| [
"ralf.hambach@uni-ulm.de"
] | ralf.hambach@uni-ulm.de |
fec994a3c91bc6f844537ef2d976044fa97d29db | 768c23b9cfd43942935b8f98d07c82dfab5f23f0 | /inventory/code/inventory_management_system/models.py | e9ee2a2fb21b8bec4d9a34ea13338fddc22d83ac | [] | no_license | dhananisangit/inventory_management_system | 164c9c8e7e2dd1e4d1cae1c6f8d2b74028428556 | b3565ff2169cac56682890b6a9d7c3697cda6d92 | refs/heads/master | 2021-01-02T09:17:57.940118 | 2017-08-03T18:37:45 | 2017-08-03T18:37:45 | 99,186,275 | 3 | 3 | null | null | null | null | UTF-8 | Python | false | false | 4,016 | py | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
from django import forms
from django.db import connections
import re, json
import time
import datetime
import math
class Parts(models.Model):
name = models.CharField(max_length=254, unique=True, verbose_name='Part Name')
description = models.CharField(max_length=254, verbose_name='Part Description')
def __unicode__(self):
return unicode(self.name)
def get_parts_name(self):
part_names = self.objects.only('name', flat=True)
return part_names
def get_parts_description(self):
parts_description = self.objects.only('description', flat=True)
class Meta:
verbose_name = 'Parts'
verbose_name_plural = 'Parts'
class Building(models.Model):
name = models.CharField(max_length=254, unique=True, verbose_name='Name')
def __unicode__(self):
return unicode(self.name)
def get_building_names(self):
building_names = self.objects.only('name', flat=True)
return part_names
class Meta:
verbose_name = 'Building'
class Location(models.Model):
name = models.CharField(max_length=254, unique=True, verbose_name='Name')
building_id = models.ForeignKey(Building, on_delete=models.PROTECT)
def __unicode__(self):
return unicode(self.name)
def get_location_names(self):
location_names = self.objects.only('name', flat=True)
return part_names
class Meta:
verbose_name = 'Location'
class Inventory_details(models.Model):
part_number = models.ForeignKey(Parts, on_delete=models.PROTECT)
# description = models.CharField(max_length=510)
# description = Parts.objects.get(part_number=)
quantity = models.PositiveIntegerField()
location_id = models.ForeignKey(Location, default="1", on_delete=models.PROTECT)
building_id = models.ForeignKey(Building, on_delete=models.PROTECT)
date_created = models.DateTimeField(auto_now_add=True, verbose_name='Date Created')
date_modified = models.DateTimeField(auto_now=True, verbose_name='Date Modified')
def __unicode__(self):
return unicode(self.part_number)
def create(self, part_id):
part_info = self.objects.get(part_number=part_id)
return part_info
def get_parts_description(self, part_id):
parts_description = Parts.objects.filter(name=str(part_id)).values_list('description')
return str(parts_description[0][0])
class Meta:
verbose_name = 'Inventory Details'
verbose_name_plural = 'Inventory Details'
class Move_log(models.Model):
part_number = models.ForeignKey(Parts, on_delete=models.PROTECT)
quantity = models.PositiveIntegerField()
from_inventory = models.CharField(max_length=15)
to_inventory = models.CharField(max_length=15)
date = models.DateTimeField(auto_now_add=True)
user_id = models.ForeignKey(User)
reason = models.TextField(verbose_name='Reason')
def __unicode__(self):
return unicode(self.part_number)
class Meta:
verbose_name = 'Move Log'
class Product_rate(models.Model):
build_rate = models.FloatField(verbose_name='Build Rate')
product = models.CharField(max_length=254)
def __unicode__(self):
return unicode(self.product)
class Meta:
verbose_name = 'Product Rate'
class Purchase(models.Model):
part_number = models.ForeignKey(Parts, on_delete=models.PROTECT)
lead_time = models.PositiveIntegerField(verbose_name='Lead Time')
qty_beamplus = models.PositiveIntegerField(verbose_name='Quantity BeamPlus')
qty_beampro = models.PositiveIntegerField(verbose_name='Quantity BeamPro')
def __unicode__(self):
return unicode(self.part_number)
class Meta:
verbose_name = 'Purchases'
verbose_name_plural = 'Purchases'
| [
"sdhanani@st30823mbp.lan.suitabletech.com"
] | sdhanani@st30823mbp.lan.suitabletech.com |
e2927e4a8817d64a103071a883dad7138c20bd83 | 8332f04ac6a61fcd540b1796c90d6fcf8a38e650 | /src/TrasformObject.py | 03ee98f384398b84578ce146e31585b73a0ddf6e | [] | no_license | giospada/ScaricaCompiti | b6070cfe58c5ea2387b14109fcab35f7a4380e0f | 1fb4238a565bb8cc11f1779763fc1ce7fea3c3d8 | refs/heads/main | 2023-07-13T19:43:41.183997 | 2021-08-26T16:51:34 | 2021-08-26T16:51:34 | 334,932,817 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,080 | py | import datetime as dt
import re
class TrasformObject:
def __init__(self,compiti):
self.today=dt.datetime.now().date()
self.compiti=compiti
def dataMassima(self):
for obj in self.compiti:
datCompiti=dt.date.fromisoformat(obj["datCompiti"])
datGiorno=dt.date.fromisoformat(obj["datGiorno"])
if(datCompiti<datGiorno):
obj["datCompiti"]=dt.datetime.strptime(datGiorno, "%Y-%m-%d").date()
obj["datGiorno"]=dt.datetime.strptime(datCompiti, "%Y-%m-%d").date()
def ordina(self):
self.compiti.sort(
key=(lambda x:dt.date.fromisoformat(x["datCompiti"])),
reverse=False)
def filtra(self,per,dacercare):
self.compiti=list(filter(lambda x:re.search(x[per],dacercare) ,self.compiti))
def onlyNext(self):
self.compiti=list(filter(lambda x: (dt.date.fromisoformat(x["datCompiti"])-self.today).days>-2,self.compiti))
def toTaskWorrior(self):
return
#TODO:implementare il file per esportarlo in taskworrior
| [
"giospadaccini74@gmail.com"
] | giospadaccini74@gmail.com |
d7f2ac70b8cb10c2f05a112b0c00c9af580c876b | fcb87e969a3989f2023f3847a5f0e1289a0a8694 | /sip/execution_control/configuration_db/sip_config_db/scheduling/_scheduling_object_list.py | 15f0af7ef8ff43197a068b6a31c7c792caf4f15b | [
"BSD-3-Clause"
] | permissive | SKA-ScienceDataProcessor/integration-prototype | 299eb0aa41ba9c7f683f5bac101af5a19fccb171 | 5875dc0489f707232534ce75daf3707f909bcd15 | refs/heads/master | 2021-05-01T05:02:16.697902 | 2019-07-28T22:32:05 | 2019-07-28T22:32:05 | 58,473,707 | 3 | 10 | BSD-3-Clause | 2021-03-25T22:21:08 | 2016-05-10T15:41:14 | C++ | UTF-8 | Python | false | false | 3,436 | py | # -*- coding: utf-8 -*-
"""Base class for list of scheduling or processing block data objects."""
from typing import List
from ._scheduling_object import SchedulingObject
from .. import ConfigDb
from .._events.event_queue import EventQueue
from .._events.pubsub import get_subscribers, publish, subscribe
DB = ConfigDb()
class SchedulingObjectList:
"""Base class for SBI and PB data objects API."""
def __init__(self, object_type: str):
"""Initialise variables.
Args:
object_type (str): Object Type
"""
self.type = object_type
@property
def num_active(self) -> int:
"""Get the number of active scheduling objects."""
return len(self.active)
@property
def num_aborted(self) -> int:
"""Get the number of aborted scheduling objects."""
return len(self.aborted)
@property
def num_completed(self) -> int:
"""Get the number of completed scheduling objects."""
return len(self.completed)
@property
def active(self) -> List[str]:
"""Get list of active scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:active'.format(self.type))
@property
def aborted(self) -> List[str]:
"""Get list of aborted scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:aborted'.format(self.type))
@property
def completed(self) -> List[str]:
"""Get list of completed scheduling objects.
Returns:
list, list of object ids
"""
return DB.get_list('{}:completed'.format(self.type))
def set_complete(self, object_id: str):
"""Mark the specified object as completed."""
if object_id in self.active:
DB.remove_from_list('{}:active'.format(self.type), object_id)
DB.append_to_list('{}:completed'.format(self.type), object_id)
###########################################################################
# Pub/sub events functions
###########################################################################
def subscribe(self, subscriber: str) -> EventQueue:
"""Subscribe to scheduling object events.
Args:
subscriber (str): Subscriber name.
Returns:
events.EventQueue, Event queue object for querying PB events.
"""
return subscribe(self.type, subscriber)
def get_subscribers(self) -> List[str]:
"""Get the list of subscribers.
Get the list of subscribers to Scheduling Block Instance (SBI) or
Processing Block events.
Returns:
List[str], list of subscriber names.
"""
return get_subscribers(self.type)
def publish(self, object_id: str, event_type: str,
event_data: dict = None):
"""Publish a scheduling object event.
Args:
object_id (str): ID of the scheduling object
event_type (str): Type of event.
event_data (dict, optional): Event data.
"""
object_key = SchedulingObject.get_key(self.type, object_id)
publish(event_type=event_type,
event_data=event_data,
object_type=self.type,
object_id=object_id,
object_key=object_key,
origin=None)
| [
"ben.mort@gmail.com"
] | ben.mort@gmail.com |
f70ae58e535f116c40b1b7cda964a910e9a6f25b | f4a8744b28a6b4301f7e3d60d99cfa3d10bfa073 | /l10n_es_prev_tesoreria/wizard/wiz_crear_factura.py | 920fbd04de162431e5d92ea5742ee068e7df830d | [] | no_license | kailIII/openerp-spain | 4f6c4d9adc5544470f105ed36f905a2da79ccb4a | 1303d175d74e41d03a167d0e15d84be419d32121 | refs/heads/master | 2020-12-25T20:43:03.259516 | 2013-03-09T15:14:46 | 2013-03-09T15:14:46 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,556 | py | # -*- encoding: utf-8 -*-
##############################################################################
#
# Avanzosc - Avanced Open Source Consulting
# Copyright (C) 2011 - 2012 Avanzosc <http://www.avanzosc.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
##############################################################################
import decimal_precision as dp
from tools.translate import _
from osv import osv
from osv import fields
class wiz_crear_factura(osv.osv_memory):
_name = 'wiz.crear.factura'
_description = 'Asistente para crear las facturas'
_columns = {
'partner_id': fields.many2one('res.partner', 'Empresa', readonly=True),
'journal_id': fields.many2one('account.journal', 'Diario', domain=[('type', '=', 'purchase')], required=True),
'description': fields.char('Descripción', size=64, required=True),
'importe': fields.float('Importe', digits_compute=dp.get_precision('Account')),
'pago': fields.integer('Pago'),
'type': fields.char('Tipo de Pago', size=1),
}
def default_get(self, cr, uid, fields_list, context=None):
values = {}
if context['active_model'] == "l10n.es.tesoreria.pagos.var.plan":
obj = self.pool.get('l10n.es.tesoreria.pagos.var.plan')
type = 'V'
else:
obj = self.pool.get('l10n.es.tesoreria.pagos.period.plan')
type = 'P'
for pago in obj.browse(cr, uid, context['active_ids']):
if pago.factura_id:
raise osv.except_osv(_('Error!'),_('Este pago ya tiene una factura asignado!!'))
values = {
'partner_id': pago.partner_id.id,
'journal_id': pago.diario.id,
'description': pago.name,
'importe': pago.importe,
'pago': int(pago.id),
'type': type,
}
return values
def button_create_inv(self, cr, uid, ids, context=None):
invoice_obj = self.pool.get('account.invoice')
invoice_line_obj = self.pool.get('account.invoice.line')
address_obj = self.pool.get('res.partner.address')
for wiz in self.browse(cr, uid, ids):
address = address_obj.search(cr, uid, [('partner_id', '=', wiz.partner_id.id)])
if address:
values = {
'name': 'Prev: '+ wiz.description + '/ Importe: ' + str(wiz.importe),
'reference': 'Prev: '+ wiz.description + '/ Importe: ' + str(wiz.importe),
'partner_id': wiz.partner_id.id,
'journal_id': wiz.journal_id.id,
'address_invoice_id': address[0],
'type': 'in_invoice',
'account_id': wiz.partner_id.property_account_receivable.id,
}
if wiz.partner_id.property_payment_term:
values.update({'payment_term': wiz.partner_id.property_payment_term.id})
if wiz.partner_id.payment_type_customer:
values.update({'payment_type': wiz.partner_id.payment_type_customer.id})
if wiz.partner_id.property_account_position:
values.update({'fiscal_position': wiz.partner_id.property_account_position.id})
else:
raise osv.except_osv(_('Error!'),_('Address not found for Partner: '), wiz.partner_id.name)
invoice_id = invoice_obj.create(cr, uid, values)
if wiz.type == 'V':
obj = self.pool.get('l10n.es.tesoreria.pagos.var.plan')
else:
obj = self.pool.get('l10n.es.tesoreria.pagos.period.plan')
obj.write(cr, uid, wiz.pago, {'factura_id': invoice_id, 'diario': wiz.journal_id.id, 'pagado': 1})
return {'type':'ir.actions.act_window_close'}
wiz_crear_factura() | [
"ajuaristio@gmail.com"
] | ajuaristio@gmail.com |
f32f2075cffb1ee258d2840c969615cb58be0bbf | f0d713996eb095bcdc701f3fab0a8110b8541cbb | /qM6zWQM7gdfPgE9Hh_10.py | ac926ffffc597d07e0e765dc6f988e28824815d1 | [] | no_license | daniel-reich/turbo-robot | feda6c0523bb83ab8954b6d06302bfec5b16ebdf | a7a25c63097674c0a81675eed7e6b763785f1c41 | refs/heads/main | 2023-03-26T01:55:14.210264 | 2021-03-23T16:08:01 | 2021-03-23T16:08:01 | 350,773,815 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,147 | py | """
Given a _dictionary_ of some items with _star ratings_ and a _specified star
rating_ , return a new dictionary of items **which match the specified star
rating**. Return `"No results found"` if _no item_ matches the _star rating_
given.
### Examples
filter_by_rating({
"Luxury Chocolates" : "*****",
"Tasty Chocolates" : "****",
"Aunty May Chocolates" : "*****",
"Generic Chocolates" : "***"
}, "*****") ➞ {
"Luxury Chocolates" : "*****",
"Aunty May Chocolates" : "*****"
}
filter_by_rating({
"Continental Hotel" : "****",
"Big Street Hotel" : "**",
"Corner Hotel" : "**",
"Trashviews Hotel" : "*",
"Hazbins" : "*****"
}, "*") ➞ {
"Trashviews Hotel" : "*"
}
filter_by_rating({
"Solo Restaurant" : "***",
"Finest Dinings" : "*****",
"Burger Stand" : "***"
}, "****") ➞ "No results found"
### Notes
N/A
"""
def filter_by_rating(d, rating):
dict = b = { key: value for key, value in d.items() if value == rating }
if dict == {}:
return 'No results found'
else:
return dict
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
3972892ec6f2a0828d64b1d20362ddcb648f8cc4 | 4a9f742ddd2c1a2340c3b854eeaef511329515c8 | /RoccoAppExit.py | 18f6ff6f663eab365e25b1a150ec41600d99360a | [] | no_license | TheGreatKO/CNBC | 7f544fb94695d36b314dc853c80d785b27bb7370 | e00095ab3a2464c215e6789b6172ac3bf6ec58e9 | refs/heads/master | 2021-01-19T11:51:58.144664 | 2016-10-09T01:24:35 | 2016-10-09T01:24:35 | 69,774,082 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 813 | py | __author__ = 'ko'
import logging
import logging.config
import sys
logging.config.fileConfig('logging.conf') # create and configure logger
logger = logging.getLogger('Rocco.ExitDialog')
from cnbcAppExitDialog import Ui_ExitDialog
from PyQt5.QtWidgets import QDialog, QMainWindow
class MyExitDialog(QDialog):
def __init__(self, parent=None):
super(MyExitDialog, self).__init__()
self.ui = Ui_ExitDialog()
self.ui.setupUi(self)
# use new style signals
self.ui.btnYes.clicked.connect(self.e)
self.ui.btnNo.clicked.connect(self.reject)
def close_app(self):
logger.debug("Closing the Rocco Program via Exit Dialog")
QApplication.quit()
def close_dia(self):
logger.debug("Closing the Exit Dialog")
MyExitDialog.close()
| [
"hanlonko@gmail.com"
] | hanlonko@gmail.com |
cd993620d1e70f876c831b3843aa66125d43bbad | fdf80ffe53395c1f04a0be3e4e95918d0b0ea379 | /Desafio45.py | a26bb748c96168bb9e2e049109d4bce3dead6fb2 | [] | no_license | henrique-af/cursoemvideo-python | 146308e4fb38bb6be4b3a5a262a8ca68da7e473a | c81fd653ff45eb1d1b22776d914381485fe2bb44 | refs/heads/main | 2023-05-27T04:16:12.722297 | 2021-06-16T21:52:35 | 2021-06-16T21:52:35 | 377,636,021 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,439 | py | print('====== DESAFIO 45 ======')
from time import sleep
import emoji
from random import randint
print(emoji.emojize('Suas opções:\n'
'1 - Pedra :fist:\n'
'2 - Papel :hand:\n'
'3 - Tesoura :v:\n', use_aliases=True))
menu = int(input('Qual a sua jogada? '))
computador = randint(1,3)
sleep(0.2)
if menu !=1 and menu !=2 and menu !=3:
print('Opção inválida, finalizando o programa!')
else:
print('\nJO')
sleep(0.6)
print('KEN')
sleep(0.6)
print('PO!')
sleep(0.6)
if menu == 1 and computador == 2:
print(emoji.emojize('\nVocê perdeu! Computador escolheu papel :hand:',use_aliases=True))
elif menu == 1 and computador == 3:
print(emoji.emojize('\nVocê ganhou! Computador escolheu tesoura :v:',use_aliases=True))
elif menu == 2 and computador == 3:
print(emoji.emojize('\nVocê perdeu! Computador escolheu tesoura :v:',use_alises=True))
elif menu == 2 and computador == 1:
print(emoji.emojize('\nVocê ganhou! Computador escolheu pedra :fist:',use_aliases=True))
elif menu == 3 and computador == 1:
print(emoji.emojize('\nVocê perdeu! Computador escolheu pedra :fist:',use_aliases=True))
elif menu == 3 and computador == 2:
print(emoji.emojize('\nVocê ganhou! Computador escolheu papel :hand:',use_aliases=True))
elif menu == computador:
print('\nInfelizmente empatou!') | [
"henriquealbuquerquef@gmail.com"
] | henriquealbuquerquef@gmail.com |
35757bf0f4d8afe1c0b99428daee2cf27e28c9fd | 97af3c1e09edbb09dfabe0dd8cb5334735d874b6 | /code/lib/python/console/clint/textui/progress.py | 960c35b9cb5d9319ca98f0dd9a3e887086ff01bf | [] | no_license | joyrexus/ldp | 31d3e155110e3249ad0f7c97f1b663120c6a125d | d0e15f051bb175fc66a4647b3001b31702aa16f3 | refs/heads/master | 2021-01-17T14:30:46.115805 | 2015-05-05T20:20:14 | 2015-05-05T20:20:14 | 11,434,923 | 2 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,180 | py | # -*- coding: utf-8 -*-
"""
clint.textui.progress
~~~~~~~~~~~~~~~~~
This module provides the progressbar functionality.
"""
from __future__ import absolute_import
import sys
STREAM = sys.stderr
BAR_TEMPLATE = '%s[%s%s] %i/%i\r'
BAR_EMPTY_CHAR = '-'
BAR_FILLED_CHAR = '='
DOTS_CHAR = '.'
def bar(it, label='', width=32, hide=False):
"""Progress iterator. Wrap your iterables with it."""
def _show(_i):
x = int(width*_i/count)
if not hide:
STREAM.write(BAR_TEMPLATE % (
label, BAR_FILLED_CHAR*x, BAR_EMPTY_CHAR*(width-x), _i, count))
STREAM.flush()
count = len(it)
if count:
_show(0)
for i, item in enumerate(it):
yield item
_show(i+1)
if not hide:
STREAM.write('\n')
STREAM.flush()
def dots(it, label='', hide=False):
"""Progress iterator. Prints a dot for each item being iterated"""
count = 0
if not hide:
STREAM.write(label)
for item in it:
if not hide:
STREAM.write(DOTS_CHAR)
sys.stderr.flush()
count += 1
yield item
STREAM.write('\n')
STREAM.flush()
| [
"joyrexus@gmail.com"
] | joyrexus@gmail.com |
1cb4025b464eaf48bef3a8516fc4195d693d4a8d | aaba9cb63c35480ecfafad8eb50730479274f034 | /main.py | 5cf7999a46f03e97f33b69d415d5257cf45b0db0 | [] | no_license | RoninEMH/Python-Encryptions | 4b7b095b57fd447e41c10d5c74b591d2be682f84 | 043c5702da85569a4be880ab3786ee1066467a19 | refs/heads/master | 2023-01-01T00:11:03.159135 | 2020-11-03T10:45:03 | 2020-11-03T10:45:03 | 308,143,907 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,929 | py | from tkinter import *
from tkinter import filedialog
import os
import RandomEncryption.Encrypt as Encrypt
def uploadContent(root):
file_path = filedialog.askopenfilename(filetypes=[("Text files", "*.txt")],
title='Open File',
initialdir=str(os.getcwd()))
file = open(file_path, "r")
print(root.winfo_children())
content = root.winfo_children()[1]
content.configure(state="normal")
content.delete("1.0", END)
for line in file:
content.insert(END, line)
content.configure(state="disabled")
file.close()
def createEncryptFile(root):
content = root.winfo_children()[1]
text = content.get("1.0", END)
if not os.path.exists(os.getcwd() + "\.\Dictionaries"):
print("creating...")
os.mkdir(os.getcwd() + "\.\Dictionaries")
else:
print("already have dir")
path = os.getcwd() + "\.\Dictionaries"
count = len(os.listdir(path))
print(path, count)
etext = Encrypt.encrypt(text, "Dictionaries\dictionary" + str(count + 1) + ".txt")
file_path = filedialog.asksaveasfile(filetypes=[("text files", "*.txt")])
file = open(file_path.name, "w")
file.write(etext)
file.close()
content.configure(state="normal")
content.delete("1.0", END)
content.insert(END, etext)
content.configure(state="disabled")
def createDecryptFile(root):
content = root.winfo_children()[1]
etext = content.get("1.0", END)
file_path = filedialog.askopenfilename(filetypes=[("text files", "*.txt")],
initialdir=str(os.getcwd() + "/./Dictionaries"))
text = Encrypt.decrypt(etext, file_path)
content.configure(state="normal")
content.delete("1.0", END)
content.insert(END, text)
content.configure(state="disabled")
def openNewWindow():
root = Toplevel(start)
root.geometry("500x500")
titleLabel = Label(root, height=0, width=0, text="Text of file here:", font=20)
titleLabel.place(x=0, y=0)
content = Text(root, height=30, width=30)
content.configure(state="disabled")
content.place(x=120, y=10)
uploadButton = Button(root, command=lambda: uploadContent(root), text="Upload", height=2, width=10)
uploadButton.place(x=400, y=85)
encryptButton = Button(root, command=lambda: createEncryptFile(root), text="Encrypt", height=2, width=10)
encryptButton.place(x=400, y=125)
decryptButton = Button(root, command=lambda: createDecryptFile(root), text="Decrypt", height=2, width=10)
decryptButton.place(x=400, y=165)
root.mainloop()
if __name__ == '__main__':
start = Tk()
start.geometry("300x300")
btn = Button(start,
text="Click to open a the encryption window",
command=openNewWindow,
height=2, width=30)
btn.place(x=50, y=50)
start.mainloop()
| [
"60003921+RoninEMH@users.noreply.github.com"
] | 60003921+RoninEMH@users.noreply.github.com |
ad528d06b79a6ff625cf15f31575b1d4f2df68d0 | b296ce3375dfb58d9f1223cb20fd642b4335d75b | /src/custom_logger.py | 8c18ce28adab8cdc46138ad60255ac4f5e61ac38 | [] | no_license | kidnamedtony/warp10 | 0d4dc23266867ac9afe5a0e15b4dccefd5b82b32 | 5f82a5d2cc05aff1dc14d00f9912cbd4fad52fd7 | refs/heads/master | 2020-08-14T04:12:00.568707 | 2019-11-23T20:39:02 | 2019-11-23T20:39:02 | 215,095,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 764 | py | import logging
# Creating a custom logger object:
logger = logging.getLogger("webscraping_helpers")
# Setting global setting for logging
logging.basicConfig(level=logging.DEBUG)
# Handlers for the logger oject:
c_handler = logging.StreamHandler()
f_handler = logging.FileHandler("Progress.log", "a")
c_handler.setLevel(logging.INFO)
f_handler.setLevel(logging.DEBUG)
# Formatter to set time/date formate for the handlers to output:
c_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
f_format = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
c_handler.setFormatter(c_format)
f_handler.setFormatter(f_format)
# Add handlers to logger object:
logger.addHandler(c_handler)
logger.addHandler(f_handler)
| [
"a.loiseleur@gmail.com"
] | a.loiseleur@gmail.com |
c93abfeac1a23ee94be4cfa675344b58b62a7439 | 42a812ac785752921dcdddd4ae56064b51452b39 | /bulletin/post/tests/test_post_view.py | cd2fe8bc9b73e4f73864e1875b41fc2744fb8149 | [] | no_license | Pre-Onboarding-Listerine/aimmo-assignment-team-1 | e4a15d3e71f1985febf911360691389f5996f0fb | d94dd7482f065ac1b020bb500984740c13af14e6 | refs/heads/main | 2023-09-02T12:23:49.693075 | 2021-11-03T00:25:18 | 2021-11-03T00:25:18 | 423,444,898 | 1 | 3 | null | 2021-11-02T16:35:38 | 2021-11-01T11:46:19 | Python | UTF-8 | Python | false | false | 4,881 | py | import json
import unittest
from datetime import datetime
from http import HTTPStatus
from unittest import mock
from unittest.mock import MagicMock
import jwt
from assertpy import assert_that
from django.conf import settings
from django.test import Client
from member.models import Member
from ..dto.deleted_post_id import DeletedPostId
from ..dto.post_changes import PostChanges
from ..dto.post_content import PostContents
from ..dto.post_details import PostDetails
from ..models.posting import Posting
from ..service import PostService
from member.service import MemberService
class PostViewTest(unittest.TestCase):
def setUp(self):
self.client = Client()
@mock.patch.object(MemberService, 'get_member')
@mock.patch.object(PostService, 'write')
def test_create_post_with_post_contents(self, write, get_member):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.post(
"/posts",
data=json.dumps({
"title": "json title",
"content": "json content",
"category": "json"
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.CREATED)
write.assert_called_with(
PostContents(
title="json title",
content="json content",
category="json"
),
Member(
username="asd",
password="123qwe"
)
)
@mock.patch.object(PostService, 'edit')
@mock.patch.object(MemberService, 'get_member')
def test_update_post_with_author(self, get_member, edit):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.patch(
"/posts",
data=json.dumps({
"id": 1,
"title": "json title",
"content": "json content",
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.OK)
changes = PostChanges(
id=1,
title="json title",
content="json content"
)
updater = Member(
username="asd",
password="123qwe"
)
edit.assert_called_with(changes, updater)
@mock.patch.object(PostService, 'remove')
@mock.patch.object(MemberService, 'get_member')
def test_delete_with_author(self, get_member, remove):
get_member.return_value = Member(
username="asd",
password="123qwe"
)
access_token = "Bearer " + jwt.encode(
payload={
"username": "asd"
},
key=settings.JWT_SECRET,
algorithm=settings.JWT_ALGORITHM
)
headers = {"HTTP_Authorization": access_token}
response = self.client.delete(
"/posts",
data=json.dumps({
"id": 1
}),
content_type="application/json",
**headers
)
assert_that(response.status_code).is_equal_to(HTTPStatus.NO_CONTENT)
deleted_post_id = DeletedPostId(
id=1
)
deleter = Member(
username="asd",
password="123qwe"
)
remove.assert_called_with(deleted_post_id, deleter)
@mock.patch.object(PostService, 'details')
def test_get_details_with_post_id(self, details):
author = Member(
username="asd",
password="123qwe"
)
details.return_value = PostDetails(
id=1,
author=author.username,
title="before title",
content="before content",
category="before",
created_at=datetime.utcnow().strftime("%m-%d-%Y, %H:%M:%S"),
updated_at=datetime.utcnow().strftime("%m-%d-%Y, %H:%M:%S"),
comments=[],
hits=0
)
response = self.client.get(
"/posts/1"
)
assert_that(response.status_code).is_equal_to(HTTPStatus.OK)
details.assert_called_with(1, None)
| [
"rlawndhks217@gmail.com"
] | rlawndhks217@gmail.com |
7d87bfec8e15720d2d096e7060e4dc7534528c54 | 36a88379f67d2e7780f2af7e2c88d111368768cb | /meiduo1/celery_tasks/main.py | 50565bcf45ee1aa527a9c33ebaf2b047cdc6c8c7 | [] | no_license | crystal-yu-qian/meiduo | 77b7f6c2ba97339c106caf305e5c9f618968a36c | 8f810d34f6d7bda6fc24ce3536b54ece6adddb56 | refs/heads/master | 2020-06-24T15:14:43.282535 | 2019-08-03T13:32:59 | 2019-08-03T13:32:59 | 198,997,938 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 249 | py | from celery import Celery
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "meiduo1.settings")
app = Celery('celery_tasks')
app.config_from_object('celery_tasks.config')
app.autodiscover_tasks(['celery_tasks.sms','celery_tasks.email'])
| [
"1020414192@qq.com"
] | 1020414192@qq.com |
74458f6a29b52a4aba737448865b8f86ca8a360b | 23611933f0faba84fc82a1bc0a85d97cf45aba99 | /google-cloud-sdk/lib/surface/version.py | 7d7321ca5431114f3472d2997a60ebba92f03cde | [
"LicenseRef-scancode-unknown-license-reference",
"Apache-2.0"
] | permissive | KaranToor/MA450 | 1f112d1caccebdc04702a77d5a6cee867c15f75c | c98b58aeb0994e011df960163541e9379ae7ea06 | refs/heads/master | 2021-06-21T06:17:42.585908 | 2020-12-24T00:36:28 | 2020-12-24T00:36:28 | 79,285,433 | 1 | 1 | Apache-2.0 | 2020-12-24T00:38:09 | 2017-01-18T00:05:44 | Python | UTF-8 | Python | false | false | 1,488 | py | # Copyright 2013 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Command to print version information for Cloud SDK components.
"""
from googlecloudsdk.calliope import base
from googlecloudsdk.core import config
from googlecloudsdk.core.updater import update_manager
@base.ReleaseTracks(base.ReleaseTrack.GA)
class Version(base.Command):
"""Print version information for Cloud SDK components.
This command prints version information for each installed Cloud SDK
component and prints a message if updates are available.
"""
def Run(self, args):
if config.Paths().sdk_root:
# Components are only valid if this is a built Cloud SDK.
manager = update_manager.UpdateManager()
versions = dict(manager.GetCurrentVersionsInformation())
else:
versions = {}
versions['Google Cloud SDK'] = config.CLOUD_SDK_VERSION
return versions
def Format(self, args):
return 'flattened[no-pad,separator=" "]'
| [
"toork@uw.edu"
] | toork@uw.edu |
0a49f27b81b96b899e8494a9f4512dc15507f254 | 73a405dd8e06154965f3044c2c98aec511b2a87a | /Django-Rest/project_restYoutube/src/urls.py | bb29739bf201b999b5bf5bd9f85407851732ee90 | [] | no_license | kendalvictor/codeando | a32f33147b72963099c4dedd1a62e65041043d48 | 46b0c500f2cef7b21ffb344812ed143d0461e5a7 | refs/heads/master | 2021-07-09T17:15:32.188542 | 2019-01-14T19:10:36 | 2019-01-14T19:10:36 | 110,754,891 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 752 | py | """postagging URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path
urlpatterns = [
path('admin/', admin.site.urls),
]
| [
"v.villacorta.unmsm@gmail.com"
] | v.villacorta.unmsm@gmail.com |
99fa75604275556ce86c3d1406d1aae5cb567f15 | ef69f75b8afe379573a3086265f67e10d7d49cc0 | /kopia_zapasowa/conway.py | 8e33b18470338fb67a3109468ac3b319fb478a03 | [] | no_license | piterczak/python | cb52353033df0d0b45de5bf90ae4abc9fe505db1 | 72daa88da430716604744189b5f9ecfad0e8d01d | refs/heads/main | 2023-05-05T12:31:46.704560 | 2021-05-12T14:35:12 | 2021-05-12T14:35:12 | 358,575,228 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,730 | py | import random, time, copy
WIDTH=30
HEIGHT=10
nextCells=[]
for x in range(WIDTH):
column = []
for y in range(HEIGHT):
if random.randint(0,1)==0:
column.append('#') ## dodawanie zywej komórki
else:
column.append('') ## dodawanie martwej komórki
nextCells.append(column) #lista kolumn
while True: # Main program loop.
print('\n\n\n\n\n') # Separate each step with newlines.
currentCells = copy.deepcopy(nextCells)
# Print currentCells on the screen:
for y in range(HEIGHT):
for x in range(WIDTH):
print(currentCells[x][y], end='') # Print the # or space.
print() # Print a newline at the end of the row.
# Calculate the next step's cells based on current step's cells:
for x in range(WIDTH):
for y in range(HEIGHT):
# Get neighboring coordinates:
# `% WIDTH` ensures leftCoord is always between 0 and WIDTH - 1
leftCoord = (x - 1) % WIDTH
rightCoord = (x + 1) % WIDTH
aboveCoord = (y - 1) % HEIGHT
belowCoord = (y + 1) % HEIGHT
# Count number of living neighbors:
numNeighbors = 0
if currentCells[leftCoord][aboveCoord] == '#':
numNeighbors += 1 # Top-left neighbor is alive.
if currentCells[x][aboveCoord] == '#':
numNeighbors += 1 # Top neighbor is alive.
if currentCells[rightCoord][aboveCoord] == '#':
numNeighbors += 1 # Top-right neighbor is alive.
if currentCells[leftCoord][y] == '#':
numNeighbors += 1 # Left neighbor is alive.
if currentCells[rightCoord][y] == '#':
numNeighbors += 1 # Right neighbor is alive.
if currentCells[leftCoord][belowCoord] == '#':
numNeighbors += 1 # Bottom-left neighbor is alive.
if currentCells[x][belowCoord] == '#':
numNeighbors += 1 # Bottom neighbor is alive.
if currentCells[rightCoord][belowCoord] == '#':
numNeighbors += 1 # Bottom-right neighbor is alive.
# Set cell based on Conway's Game of Life rules:
if currentCells[x][y] == '#' and (numNeighbors == 2 or
numNeighbors == 3):
# Living cells with 2 or 3 neighbors stay alive:
nextCells[x][y] = '#'
elif currentCells[x][y] == ' ' and numNeighbors == 3:
# Dead cells with 3 neighbors become alive:
nextCells[x][y] = '#'
else:
# Everything else dies or stays dead:
nextCells[x][y] = ' '
time.sleep(1) # Add a 1-second pause to reduce flickering. | [
"piterczak@gmail.com"
] | piterczak@gmail.com |
b167c899d819166b148e3ad4bcf51e7f7d84dc8b | f1fedf0b85ee001f80bdaec157c2a3337d34b290 | /fusion_mujoco_py/export.py | f75755498bf70a5337b510d17a0b3d3dc45238f3 | [
"MIT"
] | permissive | bjnortier/fusion-mujoco-py | 437e43d3487059a9aaab715b2485043a7399a1d6 | 5df0a0acc72f91a669ee7c7d4d98c5e426b11d56 | refs/heads/master | 2021-09-21T00:31:27.223311 | 2018-08-17T14:21:54 | 2018-08-17T14:21:54 | 128,750,031 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,773 | py | # -*- coding: utf-8 -*-
import xml.etree.ElementTree as ET
import os.path
import xml.dom.minidom
import re
XML_TEMPLATE = """<?xml version="1.0" ?>
<mujoco>
<compiler angle="radian" coordinate="local" inertiafromgeom="true" settotalmass="14"/>
<default>
<joint armature=".1" damping=".01" limited="true" solimplimit="0 .8 .03" solreflimit=".02 1" stiffness="8" range="-3.1415926536 3.1415926536" />
<geom rgba="0.2 0.8 0.2 0.5" />
<site type="sphere" rgba=".9 .9 .9 1" size="0.1"/>
<motor ctrllimited="true" ctrlrange="-1 1"/>
</default>
<size nstack="300000" nuser_geom="1"/>
<option gravity="0 0 -9.81" timestep="0.01"/>
<asset>
<texture type="skybox" builtin="gradient" rgb1=".4 .5 .6" rgb2="0 0 0" width="100" height="100"/>
<texture builtin="flat" height="1278" mark="cross" markrgb="1 1 1" name="texgeom" random="0.01" rgb1="0.8 0.6 0.4" rgb2="0.8 0.6 0.4" type="cube" width="127"/>
<texture builtin="checker" height="100" name="texplane" rgb1="0 0 0" rgb2="0.8 0.8 0.8" type="2d" width="100"/>
<material name="groundplanemat" reflectance="0.5" shininess="1" specular="1" texrepeat="100 100" texture="texplane"/>
<material name="geom" texture="texgeom" texuniform="true"/>
</asset>
<worldbody>
<light cutoff="100" diffuse="1 1 1" dir="-0 0 -1.3" directional="true" exponent="1" pos="0 0 1.3" specular=".1 .1 .1"/>
<geom conaffinity="1" condim="3" name="floor" pos="0 0 0" rgba="0.8 0.9 0.8 1" size="1000 1000 1000" type="plane" material="groundplanemat"/>
</worldbody>
<tendon />
<actuator />
</mujoco>"""
def append_root_join_elements(body):
slide_template = '<joint armature="0" damping="0" limited="false" pos="0 0 0" stiffness="0"/>'
for type in ['slide', 'hinge']:
for dim in ['x', 'y', 'z']:
joint = ET.fromstring(slide_template)
joint.set('type', type)
joint.set('name', '{}_{}'.format(type, dim))
joint.set('axis', '{} {} {}'.format(
1 if dim == 'x' else 0,
1 if dim == 'y' else 0,
1 if dim == 'z' else 0))
body.append(joint)
def rescale(arr):
[x, y, z] = arr
x *= 10
y *= 10
z *= 10
return [round(x, 3), round(y, 3), round(z, 3)]
def create_xml_body(occ):
xml_body = ET.Element('body')
xml_body.set('name', occ.component.name)
(origin, xAxis, yAxis, zAxis) = occ.transform.getAsCoordinateSystem()
[x, y, z] = rescale(origin.asArray())
xml_body.set('pos', '{0} {1} {2}'.format(round(x, 3), round(y, 3), round(z, 3)))
geom = ET.SubElement(xml_body, 'geom')
geom.set('type', 'mesh')
geom.set('mesh', occ.component.name)
return xml_body
def write_stls_and_mojoco_xml(design, exportDir, instance_name):
rootComp = design.rootComponent
mujoco = ET.XML(XML_TEMPLATE)
asset = mujoco[4]
worldbody = mujoco[5]
tendon = mujoco[6]
actuator = mujoco[7]
assert asset.tag == 'asset'
assert worldbody.tag == 'worldbody'
assert tendon.tag == 'tendon'
assert actuator.tag == 'actuator'
mujoco.set('model', rootComp.name)
# export the occurrence one by one in the root component to a specified file
exportMgr = design.exportManager
allOccu = rootComp.allOccurrences
for occ in allOccu:
if not occ.isVisible:
continue
stl_filename = os.path.join(exportDir, 'stl', occ.component.name)
stlExportOptions = exportMgr.createSTLExportOptions(occ, stl_filename)
stlExportOptions.sendToPrintUtility = False
# stlExportOptions.isBinaryFormat = False
exportMgr.execute(stlExportOptions)
mesh = ET.SubElement(asset, 'mesh')
mesh.set('name', occ.component.name)
mesh.set('file', 'stl/{0}.stl'.format(occ.component.name))
mesh.set('scale', '1 1 1')
xml_body_for_occ = {}
for occ in allOccu:
xml_body = create_xml_body(occ)
xml_body_for_occ[occ.component.name] = xml_body
# Grounded component in Fusion 360 = Root components in Mujoco.
# There can be only one
found = False
for occ in allOccu:
assert found == False
if (occ.isGrounded):
body = xml_body_for_occ[occ.component.name]
append_root_join_elements(body)
# marker = ET.fromstring('<body pos="0 0 0"><geom pos="0 0 0" type="sphere" size="2" rgba="1.0 0 1.0 1"/></body>'.format(body.get('pos')))
# body.append(marker)
worldbody.append(body)
found = True
break
### Some really flippen weird Fusion 360 behaviour happening here
### Joint origin seem to be very buggy or have bizarre behaviour
### This is a fudge to get the correct joint origins
legRadiusParam = design.userParameters.itemByName('LegRadius')
leg_radius = legRadiusParam.value * 10
# Joints
assert(found)
for joint in rootComp.joints:
parent_occ = joint.occurrenceTwo
child_occ = joint.occurrenceOne
if not child_occ.isVisible:
continue
parent = xml_body_for_occ[parent_occ.component.name]
child_xml_body = xml_body_for_occ[child_occ.component.name]
(origin1, xAxis1, yAxis1, zAxis1) = parent_occ.transform.getAsCoordinateSystem()
(origin2, xAxis2, yAxis2, zAxis2) = child_occ.transform.getAsCoordinateSystem()
[parent_x, parent_y, parent_z] = rescale(origin1.asArray())
[child_x, child_y, child_z] = rescale(origin2.asArray())
local_x = child_x - parent_x
local_y = child_y - parent_y
local_z = child_z - parent_z
parent.append(child_xml_body)
# Fusion Revolute joint create Mujoco Hinge joints
if joint.jointMotion.jointType == 1:
[joint_x1, joint_y1, joint_z1] = rescale(joint.geometryOrOriginOne.origin.asArray())
joint_xml = ET.SubElement(child_xml_body, 'joint')
joint_xml.set('axis', '{0} {1} {2}'.format(*joint.geometryOrOriginOne.primaryAxisVector.asArray()))
joint_xml.set('name', '{0}'.format(joint.name))
mujuco_joint_pos = '{0} {1} {2}'.format(round(joint_x1 - local_x - leg_radius, 3), round(joint_y1 - local_y, 3), round(joint_z1 - local_z, 3))
joint_xml.set('pos', mujuco_joint_pos)
joint_xml.set('type', 'hinge')
marker = ET.fromstring('<body pos="{0}"><geom pos="0 0 0" type="sphere" size="2" rgba="0 1.0 1.0 1"/></body>'.format(mujuco_joint_pos))
child_xml_body.append(marker)
mujoco_xml = xml.dom.minidom.parseString(re.sub('\n\s*', '', str(ET.tostring(mujoco), 'utf-8'))).toprettyxml()
filename = os.path.join(exportDir, '{}.xml'.format(instance_name))
f = open(filename, 'w')
f.write(mujoco_xml)
f.close()
| [
"ben@bjnortier.com"
] | ben@bjnortier.com |
fcdbe8b38ae560684105297029179656a604f2db | 321b4ed83b6874eeb512027eaa0b17b0daf3c289 | /222/222.count-complete-tree-nodes.233499249.Accepted.leetcode.py | 07968f423174e017c6f9c7febffa58330579725a | [] | no_license | huangyingw/submissions | 7a610613bdb03f1223cdec5f6ccc4391149ca618 | bfac1238ecef8b03e54842b852f6fec111abedfa | refs/heads/master | 2023-07-25T09:56:46.814504 | 2023-07-16T07:38:36 | 2023-07-16T07:38:36 | 143,352,065 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 655 | py | class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def countNodes(self, root):
if not root:
return 0
left_subtree = self.left_depth(root.left)
right_subtree = self.left_depth(root.right)
if left_subtree == right_subtree:
return 2**left_subtree + self.countNodes(root.right)
else:
return 2**right_subtree + self.countNodes(root.left)
def left_depth(self, node):
depth = 0
while node:
node = node.left
depth += 1
return depth
| [
"huangyingw@gmail.com"
] | huangyingw@gmail.com |
dbb42a999e4285bdd06057f28ab6163d4ad4214a | 7fb1e6e0d3c452de2cda8e2338343e9862c6c88a | /29082019/012636.py | 458851940aa311ece8b9346e2b67896f01992d3c | [] | no_license | RobertoCruzF/Intensivo-Nivelacion | 3a10afc61f2744e7fd0d6019f0c572fb8b5deec9 | c7dd1a4aaf1c419f3edb35d30f200c1c0b6a26a9 | refs/heads/master | 2020-07-05T11:50:13.582245 | 2019-08-30T14:35:42 | 2019-08-30T14:35:42 | 202,641,096 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,333 | py | import numpy as np
print 0%2 # imprime el resto de la division de los dos numeros
print 1%2 # imprime el resto de la division de los dos numeros
print 2%2 # imprime el resto de la division de los dos numeros
print 3%2 # imprime el resto de la division de los dos numeros
print 4%2 # imprime el resto de la division de los dos numeros
# creo una matriz de 5X5 con valores desde el 0 al 24
a= np.arange(25).reshape(5,5)
print a # imprimo la matriz
print a%3 # imprimo la matriz con el el resto de la division por 3 de cada uno de los elmentos de la matriz
print a[a%3] # multiplicacion del array a con la matriz con el el resto de la division por 3 de cada uno de los elmentos de la matriz
print a%3==0 # imprime la matriz a rellena de True o False dependiendo si se cumple la condicion
print a[a%3==0] # imprime un array con valores dependiendo si se cumple o no la condicion
print np.nan # imprime nan
output= np.empty_like(a)
print output
output.fill(np.nan)
print output
output=np.empty_like(a,dtype='float')
print output
print a # imprime la matriz original
mask=a%3==0
output[mask]=a[mask]
print output
print np.where(a%3==0,a,np.nan)# imorime non en los valores de la matriz que ek resto no es0
print np.where(a%3==0,a,np.nan)
print a[mask]
print output[mask]
| [
"rcruz@miuandes.cl"
] | rcruz@miuandes.cl |
decad8b7513c3e2ad6dcf0a5919a7fe1be3c113f | a552eed95dd2feb2f065019a805aef09e7a8597d | /gis_2ban_1/settings/base.py | d413ada2b077839bf84e408f0ed7f650600c7c5f | [] | no_license | yeonnex/django_pinterest | d7295f47963a86add8d6b92b452f088dca582a0a | 4c92d12c171d458f8760f462c4b4e5fa50a1c6c8 | refs/heads/master | 2023-08-07T09:16:52.591837 | 2021-09-29T02:31:25 | 2021-09-29T02:31:25 | 402,365,186 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,270 | py | """
Django settings for gis_2ban_1 project.
Generated by 'django-admin startproject' using Django 3.2.4.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
import os
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
from django.urls import reverse_lazy
BASE_DIR = Path(__file__).resolve().parent.parent.parent
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bootstrap4',
'accountapp',
'profileapp',
'articleapp',
'commentapp',
'projectapp',
'subscribeapp',
'likeapp',
]
from django.contrib.messages import constants as messages
MESSAGE_TAGS = {
messages.ERROR: 'danger',
}
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'gis_2ban_1.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [BASE_DIR / 'templates']
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'gis_2ban_1.wsgi.application'
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'ko-kr'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'staticfiles')
STATICFILES_DIRS = [
BASE_DIR / "static",
]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_REDIRECT_URL = reverse_lazy('articleapp:list')
LOGOUT_REDIRECT_URL = reverse_lazy('accountapp:login') | [
"yeonnex@gmail.com"
] | yeonnex@gmail.com |
3fdf53b2f0ad51016d47fa06c73ae507c8df00ac | db366faf2463466245e2d846822bbb130ff913aa | /inputprint.py | e7814a986e70bc4c63a0acfcc87fc409b61c5a39 | [] | no_license | deep-hack/hacktoberfest | 86042c5323fdc82f9372bac19a11c2b3e3648277 | d275d9b6ee420d09d1a472a053dfd266f42623ce | refs/heads/master | 2023-08-23T10:45:34.070498 | 2021-10-13T03:09:16 | 2021-10-13T03:09:16 | 416,571,411 | 1 | 0 | null | 2021-10-13T03:07:22 | 2021-10-13T03:07:21 | null | UTF-8 | Python | false | false | 44 | py | text = input('Type something:')
print(text)
| [
"noreply@github.com"
] | noreply@github.com |
07d801933ad0974175b66fa7b5dc983374961b54 | 829c03e4c2d8f80dbb39f0bbd871e0db53c4ae95 | /core/migrations/0002_item.py | f665055442e52f29406df343ff873eeee61df88a | [] | no_license | CesarAugusto88/react_django_backend | 6e6fc0e0c3135a0c20b6e76c4c99558624aa1ddf | 42a8c45f9231c1a4194e9aaa087561c6c373120f | refs/heads/main | 2023-03-13T19:48:21.359235 | 2021-03-12T00:02:39 | 2021-03-12T00:02:39 | 345,819,974 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | # Generated by Django 3.1.6 on 2021-02-16 04:21
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Item',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=50)),
('list', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='core.list')),
],
),
]
| [
"cesarcosta.augustos@gmail.com"
] | cesarcosta.augustos@gmail.com |
0dfac5058a137be709eefbebd2ab5fbd5e882e3e | d48170adbf0b8825a2cdb8b7069fc93de67cfd1b | /camera_models/_matrices.py | 6958d98a5d43c11372028f07431cef3528656123 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0"
] | permissive | eugeniu1994/Stereo-Camera-LiDAR-calibration | d4069ca7caec601d1005d644fedc81d2dd5a481e | 54eec1b911f78ca6b66c35803c47d016b7069499 | refs/heads/master | 2023-05-12T20:56:30.799287 | 2021-06-02T11:36:58 | 2021-06-02T11:36:58 | 371,688,947 | 14 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,057 | py | from typing import Sequence
import numpy as np
from ._homogeneus import to_homogeneus
def get_plucker_matrix(A: np.ndarray, B: np.ndarray) -> np.ndarray:
A = to_homogeneus(A)
B = to_homogeneus(B)
L = A.reshape(-1, 1) * B.reshape(1, -1) - B.reshape(-1, 1) * A.reshape(1, -1)
return L
def _get_roll_matrix(theta_x: float = 0.0) -> np.ndarray:
Rx = np.array(
[
[1.0, 0.0, 0.0],
[0.0, np.cos(theta_x), -np.sin(theta_x)],
[0.0, np.sin(theta_x), np.cos(theta_x)],
]
)
return Rx
def _get_pitch_matrix(theta_y: float = 0.0) -> np.ndarray:
Ry = np.array(
[
[np.cos(theta_y), 0.0, np.sin(theta_y)],
[0.0, 1.0, 0.0],
[-np.sin(theta_y), 0.0, np.cos(theta_y)],
]
)
return Ry
def _get_yaw_matrix(theta_z: float = 0.0) -> np.ndarray:
Rz = np.array(
[
[np.cos(theta_z), -np.sin(theta_z), 0.0],
[np.sin(theta_z), np.cos(theta_z), 0.0],
[0.0, 0.0, 1.0],
]
)
return Rz
def get_rotation_matrix(
theta_x: float = 0.0, theta_y: float = 0.0, theta_z: float = 0.0
) -> np.ndarray:
# Roll
Rx = _get_roll_matrix(theta_x)
# Pitch
Ry = _get_pitch_matrix(theta_y)
# Yaw
Rz = _get_yaw_matrix(theta_z)
return Rz @ Ry @ Rx
def get_calibration_matrix(
f: float,
px: float = 0.0,
py: float = 0.0,
mx: float = 1.0,
my: float = 1.0,
) -> np.ndarray:
K = np.diag([mx, my, 1]) @ np.array([[f, 0.0, px], [0.0, f, py], [0.0, 0.0, 1.0]])
return K
def get_projection_matrix(
f: float,
px: float = 0.0,
py: float = 0.0,
C: Sequence[float] = (0.0, 0.0, 0.0),
theta_x: float = 0.0,
theta_y: float = 0.0,
theta_z: float = 0.0,
mx: float = 1.0,
my: float = 1.0,
) -> np.ndarray:
K = get_calibration_matrix(f=f, px=px, py=py, mx=mx, my=my)
R = get_rotation_matrix(theta_x=theta_x, theta_y=theta_y, theta_z=theta_z)
P = K @ R @ np.c_[np.eye(3), -np.asarray(C)]
return P
| [
"vezeteu.eugeniu@yahoo.com"
] | vezeteu.eugeniu@yahoo.com |
e3d8cb3403b6a91ceba70ae0162d75363b5c0a9d | 01abb5fe2d6a51e8ee4330eaead043f4f9aad99d | /Repo_Files/Zips/plugin.video.streamhub/resources/lib/smodules/trailer.py | 729c4e3dd4f78c4945f1e6ce4a8b48274938d418 | [] | no_license | MrAnhell/StreamHub | 01bb97bd3ae385205f3c1ac6c0c883d70dd20b9f | e70f384abf23c83001152eae87c6897f2d3aef99 | refs/heads/master | 2021-01-18T23:25:48.119585 | 2017-09-06T12:39:41 | 2017-09-06T12:39:41 | 87,110,979 | 0 | 0 | null | 2017-04-03T19:09:49 | 2017-04-03T19:09:49 | null | UTF-8 | Python | false | false | 3,905 | py | # -*- coding: utf-8 -*-
'''
Exodus Add-on
Copyright (C) 2016 Exodus
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib,json,urlparse,base64,random
from resources.lib.smodules import client
from resources.lib.smodules import control
class trailer:
def __init__(self):
self.base_link = 'http://www.youtube.com'
self.key_link = random.choice(['QUl6YVN5RDd2aFpDLTYta2habTVuYlVyLTZ0Q0JRQnZWcnFkeHNz', 'QUl6YVN5Q2RiNEFNenZpVG0yaHJhSFY3MXo2Nl9HNXBhM2ZvVXd3'])
self.key_link = '&key=%s' % base64.urlsafe_b64decode(self.key_link)
self.search_link = 'https://www.googleapis.com/youtube/v3/search?part=snippet&type=video&maxResults=5&q=%s'
self.youtube_search = 'https://www.googleapis.com/youtube/v3/search?q='
self.youtube_watch = 'http://www.youtube.com/watch?v=%s'
def play(self, name, url=None):
try:
url = self.worker(name, url)
if url == None: return
title = control.infoLabel('listitem.title')
if title == '': title = control.infoLabel('listitem.label')
icon = control.infoLabel('listitem.icon')
item = control.item(path=url, iconImage=icon, thumbnailImage=icon)
try: item.setArt({'icon': icon})
except: pass
item.setInfo(type='Video', infoLabels = {'title': title})
control.player.play(url, item)
except:
pass
def worker(self, name, url):
try:
if url.startswith(self.base_link):
url = self.resolve(url)
if url == None: raise Exception()
return url
elif not url.startswith('http://'):
url = self.youtube_watch % url
url = self.resolve(url)
if url == None: raise Exception()
return url
else:
raise Exception()
except:
query = name + ' trailer'
query = self.youtube_search + query
url = self.search(query)
if url == None: return
return url
def search(self, url):
try:
query = urlparse.parse_qs(urlparse.urlparse(url).query)['q'][0]
url = self.search_link % urllib.quote_plus(query) + self.key_link
result = client.request(url)
items = json.loads(result)['items']
items = [(i['id']['videoId']) for i in items]
for url in items:
url = self.resolve(url)
if not url is None: return url
except:
return
def resolve(self, url):
try:
id = url.split('?v=')[-1].split('/')[-1].split('?')[0].split('&')[0]
result = client.request('http://www.youtube.com/watch?v=%s' % id)
message = client.parseDOM(result, 'div', attrs = {'id': 'unavailable-submessage'})
message = ''.join(message)
alert = client.parseDOM(result, 'div', attrs = {'id': 'watch7-notification-area'})
if len(alert) > 0: raise Exception()
if re.search('[a-zA-Z]', message): raise Exception()
url = 'plugin://plugin.video.youtube/play/?video_id=%s' % id
return url
except:
return
| [
"mediahubiptv@gmail.com"
] | mediahubiptv@gmail.com |
3e7e6b480be9eac864f79df9f37018cbe2e66e19 | bf5ce5bb620ac0d865c453fc8aa923d77b9debef | /command_line/file_parser.py | 16042be937c25cba3949083a9766b8361f52fc1c | [] | no_license | Caseymonroe1/visualization_pipeline | a91e7e4e94f78522b83cd1d00f909aaa36c14adc | e896de716056e7f59fb1a019c7186a8aeb125bdb | refs/heads/master | 2023-04-13T09:05:32.042099 | 2021-03-25T15:21:29 | 2021-03-25T15:21:29 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,152 | py | from collections import defaultdict
import pandas as pd
import numpy as np
import math
def transposeRel(directory, relFile):
## set up dictionary
cols = []
maindict = defaultdict(list)
## read through output file to parse information
with open(relFile) as f:
for line in f:
if line.startswith("GSM"):
splitline = line.split("\t")
try:
indexnum = cols.index(splitline[1])
except:
cols.append(splitline[1])
indexnum = cols.index(splitline[1])
if splitline[0] in maindict.keys():
maindict[splitline[0]].insert(indexnum, float(splitline[6].strip("\n")))
maindict[splitline[0]].pop(indexnum+1)
else:
maindict[splitline[0]] = [math.nan for x in range(1, 241)]
maindict[splitline[0]].insert(indexnum, float(splitline[6].strip("\n")))
maindict[splitline[0]].pop(indexnum+1)
## turn into df and csv
df = pd.DataFrame.from_dict(maindict, orient='index', columns=cols)
filename = relFile.strip(".relatedness2")
df.to_csv(directory + "/" + filename + ".csv")
def makeDATFile(pos, gt, chrm, typecount, snpdensity, directory, outfn, typel):
## creat empty dictionary to store the varaints per chromosome
typedict = {}
## add all chromosomes to the dictionary
for chrnum in range(1, 23):
typedict[chrnum] = []
typedict['X'] = []
typedict['Y'] = []
typedict['MT'] = []
## add variants with with item from count to temp dictionary
for x in range(len(pos)):
temp ={pos[x]:typecount[x]}
try:
typedict[int(chrm[x])].append(temp)
except:
typedict[chrm[x]].append(temp)
## bin them based on original dat file
## To change allow user to differ these bin sizes for different tracts
chrnm = snpdensity[0]
start = snpdensity[1]
end = snpdensity[2]
typelist = []
for x in range(len(start)):
total = 0
counter = 0
try:
snps = typedict[int(chrnm[x][2:])]
except:
snps = typedict[chrnm[x][2:]]
for var in snps:
try:
pos = list(var.keys())[0]
if pos < end[x]:
if pos > start[x]:
total+= list(var.values())[0]
counter+=1
else:
break
except:
pos = list(var.keys())[0]
if total ==0 and counter == 0:
typelist.append(0)
else:
try:
typelist.append(total/counter)
except:
print("ERROR")
## take the snpdensity file and drop snp density and add heterozygosity
snpdensity.drop([3], axis=1)
snpdensity[3] = typelist
snpdensity.to_csv(directory + outfn + "_" + typel + ".dat", index=False, header=False, sep="\t")
def retrieveMetaData(samples, directory, outfn):
## make empty dictionary
metadata = {}
if samples is None:
samples = [ f.name for f in os.scandir(directory) if f.is_dir() ]
remove = []
for x in range(len(samples)-1):
try:
if not samples[x].startswith("GS"):
remove.append(samples[x])
except:
print("ERROR")
for removal in remove:
samples.remove(removal)
## loop through all samples included in vcf
counter = 0
cols = []
for sample in samples:
metalist = []
for file in os.listdir(directory + "/" + sample):
if file.endswith(".txt"):
with open(directory + "/" + sample + "/" + file) as f:
for line in f:
if line.startswith(" - character"):
temp = line.strip('\n').split(":")
#print(line)
empty = []
for x in range(0, len(temp)-1):
temp[x] = temp[x] + ":"
for item in temp:
if item.startswith(" - "):
continue
else:
xx = item.split(",")
for x in xx:
empty.append(x.lstrip())
for val in range(0, len(empty)):
if counter == 0:
if empty[val].endswith(":"):
cols.append(empty[val].strip(":"))
location = cols.index(empty[val].strip(":"))
else:
try:
metalist.insert(location, metalist[location] + "," + empty[val])
metalist.pop()
except IndexError:
metalist.insert(location, empty[val])
else:
if empty[val].endswith(":"):
if empty[val].strip(":") in cols:
location = cols.index(empty[val].strip(":"))
else:
cols.append(empty[val].strip(":"))
location = cols.index(empty[val].strip(":"))
else:
try:
metalist.insert(location, metalist[location] + "," + empty[val])
metalist.pop()
except IndexError:
metalist.insert(location, empty[val])
elif line.startswith(" - source"):
if counter == 0:
cols.append('source')
line = line.split(" : ")
location = cols.index('source')
metalist.insert(location, line[1].strip())
elif line.startswith(" - supp"):
if counter == 0:
cols.append('id')
line = line.split(" : ")
files = line[1].split(",")
parts = files[1].split("/")
name = parts[8][:-13]
location = cols.index('id')
metalist.insert(location, name)
metadata[sample] = metalist
counter += 1
break
df = pd.DataFrame.from_dict(metadata, orient='index', columns=cols)
df.to_csv(outfn + ".csv")
return df | [
"tuk32868@temple.edu"
] | tuk32868@temple.edu |
102cfb4a48484d5440f4765e4468f290cddc203a | ea9f2c578e479fcaebbba84d2a1fe63e96f9145d | /src/common/models/user.py | 4d4c9b4f978ae046c363d45934812a5da49ed9b4 | [] | no_license | spandey2405/onlinecoderbackend | 1a6bd278f725ae5b1ad1c57b951ac5f9f87b71eb | afffd81c027a46247dd47e2ca02ab981e124b09a | refs/heads/master | 2021-01-17T07:57:03.077054 | 2016-08-01T13:41:50 | 2016-08-01T13:41:50 | 64,668,772 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,411 | py | from django.db import models
from src.common.libraries.constants import *
import binascii, os, uuid
class UserManager(models.Manager):
def generate_userid(self):
return str(uuid.uuid4())
def generate_salt(self):
return binascii.hexlify(os.urandom(SALT_LENGTH/2)).decode()
class User(models.Model):
user_id = models.CharField(max_length=UID_LENGTH, primary_key=True, editable=False)
name = models.EmailField(max_length=200)
email = models.EmailField(max_length=MAX_EMAIL_LENGTH, unique=True)
password_hash = models.CharField(max_length=MAX_PASSWORD_LENGTH)
phoneno = models.CharField(max_length=10, default=0)
created = models.DateTimeField(auto_now_add=True)
updated = models.DateTimeField(auto_now=True)
salt = models.CharField(max_length=SALT_LENGTH)
objects = UserManager()
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return True
def save(self, *args, **kwargs):
if not self.user_id:
self.user_id = User.objects.generate_userid()
if not self.salt:
self.salt = User.objects.generate_salt()
return super(User, self).save(*args, **kwargs)
def __unicode__(self):
return self.user_id
class Meta:
db_table = 'user'
app_label = 'common' | [
"spandey2405@gmail.com"
] | spandey2405@gmail.com |
c35e93365f1e17a6d56d974a31c3515c56378c08 | 211b7ba054bce9edf398672de744e620178f387a | /venv/bin/pip | 2c32a34b4ce10cd87351bccc9aa7ebe2dc8dc817 | [] | no_license | shanmugara/lirc | 7fe695ca57de6a9d81eb5015dde3f06158a1efe1 | c813ca8f4a2f37ff426dfad3199c9199246b3dd5 | refs/heads/master | 2023-04-14T13:39:23.634191 | 2019-01-06T05:30:43 | 2019-01-06T05:30:43 | 162,856,241 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 400 | #!/Users/speriya/PycharmProjects/lirc/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==10.0.1','console_scripts','pip'
__requires__ = 'pip==10.0.1'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==10.0.1', 'console_scripts', 'pip')()
)
| [
"psraj@optonline.net"
] | psraj@optonline.net | |
aa561fe592473e8f85fe213e1e614edb4922b5b3 | 331743624d898949b9039e8a05263d3a057ae359 | /Baekjoon_Algorithm/One-Dimensional Array/7. (B1)[4344] 평균은 넘겠지.py | a920b106b6b586ee986421de90f12d8a128ada4b | [] | no_license | moey920/Algorithm | 7c968fa3f6ecb4d964581d8a44c23571f0568af4 | f9c4d22dabe6e686c79d3bf25628e5ccc7d2d377 | refs/heads/master | 2023-04-19T16:13:59.072954 | 2021-04-29T16:17:04 | 2021-04-29T16:17:04 | 362,868,426 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 190 | py | C = int(input())
for i in range(C) :
list(map(int, input().split()))
# for i in range(C) :
# score = 0
# for j in range(N[0]) :
# score += N[j+1]
# print(score) | [
"moey920@naver.com"
] | moey920@naver.com |
e42793c0bb18d4947a7c52488c8b146780db1a2c | 1548ce77537dcd50ab04b0eaee050b5d30553e23 | /autotabular/evaluation/abstract_evaluator.py | 383ee7a13fd7f7766c258b0df36b52ef013fbb89 | [
"Apache-2.0"
] | permissive | Shamoo100/AutoTabular | 4a20e349104246bf825ebceae33dca0a79928f2e | 7d71bf01d2b7d84fcf5f65c9f45c5cea1255d8a2 | refs/heads/main | 2023-08-13T21:34:34.329888 | 2021-10-02T07:06:00 | 2021-10-02T07:06:00 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 22,259 | py | import logging
import multiprocessing
import time
import warnings
from typing import Any, Dict, List, Optional, TextIO, Tuple, Type, Union, cast
import autotabular.pipeline.classification
import autotabular.pipeline.regression
import numpy as np
from autotabular.constants import CLASSIFICATION_TASKS, MULTICLASS_CLASSIFICATION, MULTILABEL_CLASSIFICATION, MULTIOUTPUT_REGRESSION, REGRESSION_TASKS
from autotabular.metrics import Scorer, calculate_loss
from autotabular.pipeline.implementations.util import convert_multioutput_multiclass_to_multilabel
from autotabular.util.backend import Backend
from autotabular.util.logging_ import PicklableClientLogger, get_named_client_logger
from ConfigSpace import Configuration
from sklearn.base import BaseEstimator
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.ensemble import VotingClassifier, VotingRegressor
from smac.tae import StatusType
from threadpoolctl import threadpool_limits
__all__ = ['AbstractEvaluator']
# General TYPE definitions for numpy
TYPE_ADDITIONAL_INFO = Dict[str, Union[int, float, str, Dict, List, Tuple]]
class MyDummyClassifier(DummyClassifier):
def __init__(
self,
config: Configuration,
random_state: np.random.RandomState,
init_params: Optional[Dict[str, Any]] = None,
dataset_properties: Dict[str, Any] = {},
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
):
self.config = config
if config == 1:
super(MyDummyClassifier, self).__init__(strategy='uniform')
else:
super(MyDummyClassifier, self).__init__(strategy='most_frequent')
self.random_state = random_state
self.init_params = init_params
self.dataset_properties = dataset_properties
self.include = include
self.exclude = exclude
def pre_transform(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None
) -> Tuple[np.ndarray, Dict[str, Any]]: # pylint: disable=R0201
if fit_params is None:
fit_params = {}
return X, fit_params
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[Union[np.ndarray, List]] = None
) -> DummyClassifier:
return super(MyDummyClassifier, self).fit(
np.ones((X.shape[0], 1)), y, sample_weight=sample_weight)
def fit_estimator(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None) -> DummyClassifier:
return self.fit(X, y)
def predict_proba(self,
X: np.ndarray,
batch_size: int = 1000) -> np.ndarray:
new_X = np.ones((X.shape[0], 1))
probas = super(MyDummyClassifier, self).predict_proba(new_X)
probas = convert_multioutput_multiclass_to_multilabel(probas).astype(
np.float32)
return probas
def estimator_supports_iterative_fit(self) -> bool: # pylint: disable=R0201
return False
def get_additional_run_info(self) -> Optional[TYPE_ADDITIONAL_INFO]: # pylint: disable=R0201
return None
class MyDummyRegressor(DummyRegressor):
def __init__(
self,
config: Configuration,
random_state: np.random.RandomState,
init_params: Optional[Dict[str, Any]] = None,
dataset_properties: Dict[str, Any] = {},
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
):
self.config = config
if config == 1:
super(MyDummyRegressor, self).__init__(strategy='mean')
else:
super(MyDummyRegressor, self).__init__(strategy='median')
self.random_state = random_state
self.init_params = init_params
self.dataset_properties = dataset_properties
self.include = include
self.exclude = exclude
def pre_transform(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None
) -> Tuple[np.ndarray, Dict[str, Any]]: # pylint: disable=R0201
if fit_params is None:
fit_params = {}
return X, fit_params
def fit(
self,
X: np.ndarray,
y: np.ndarray,
sample_weight: Optional[Union[np.ndarray,
List]] = None) -> DummyRegressor:
return super(MyDummyRegressor, self).fit(
np.ones((X.shape[0], 1)), y, sample_weight=sample_weight)
def fit_estimator(
self,
X: np.ndarray,
y: np.ndarray,
fit_params: Optional[Dict[str, Any]] = None) -> DummyRegressor:
return self.fit(X, y)
def predict(self, X: np.ndarray, batch_size: int = 1000) -> np.ndarray:
new_X = np.ones((X.shape[0], 1))
return super(MyDummyRegressor, self).predict(new_X).astype(np.float32)
def estimator_supports_iterative_fit(self) -> bool: # pylint: disable=R0201
return False
def get_additional_run_info(self) -> Optional[TYPE_ADDITIONAL_INFO]: # pylint: disable=R0201
return None
def _fit_and_suppress_warnings(logger: Union[logging.Logger,
PicklableClientLogger],
model: BaseEstimator, X: np.ndarray,
y: np.ndarray) -> BaseEstimator:
def send_warnings_to_log(
message: Union[Warning, str],
category: Type[Warning],
filename: str,
lineno: int,
file: Optional[TextIO] = None,
line: Optional[str] = None,
) -> None:
logger.debug('%s:%s: %s:%s' %
(filename, lineno, str(category), message))
return
with warnings.catch_warnings():
warnings.showwarning = send_warnings_to_log
model.fit(X, y)
return model
class AbstractEvaluator(object):
def __init__(
self,
backend: Backend,
queue: multiprocessing.Queue,
metric: Scorer,
port: Optional[int],
configuration: Optional[Union[int, Configuration]] = None,
scoring_functions: Optional[List[Scorer]] = None,
seed: int = 1,
output_y_hat_optimization: bool = True,
num_run: Optional[int] = None,
include: Optional[List[str]] = None,
exclude: Optional[List[str]] = None,
disable_file_output: Union[bool, List[str]] = False,
init_params: Optional[Dict[str, Any]] = None,
budget: Optional[float] = None,
budget_type: Optional[str] = None,
):
# Limit the number of threads that numpy uses
threadpool_limits(limits=1)
self.starttime = time.time()
self.configuration = configuration
self.backend = backend
self.port = port
self.queue = queue
self.datamanager = self.backend.load_datamanager()
self.include = include
self.exclude = exclude
self.X_valid = self.datamanager.data.get('X_valid')
self.y_valid = self.datamanager.data.get('Y_valid')
self.X_test = self.datamanager.data.get('X_test')
self.y_test = self.datamanager.data.get('Y_test')
self.metric = metric
self.task_type = self.datamanager.info['task']
self.seed = seed
self.output_y_hat_optimization = output_y_hat_optimization
self.scoring_functions = scoring_functions
if isinstance(disable_file_output, (bool, list)):
self.disable_file_output: Union[bool,
List[str]] = disable_file_output
else:
raise ValueError(
'disable_file_output should be either a bool or a list')
if self.task_type in REGRESSION_TASKS:
if not isinstance(self.configuration, Configuration):
self.model_class = MyDummyRegressor
else:
self.model_class = \
autotabular.pipeline.regression.SimpleRegressionPipeline
self.predict_function = self._predict_regression
else:
if not isinstance(self.configuration, Configuration):
self.model_class = MyDummyClassifier
else:
self.model_class = autotabular.pipeline.classification.SimpleClassificationPipeline
self.predict_function = self._predict_proba
self._init_params = {
'data_preprocessing:feat_type': self.datamanager.feat_type
}
if init_params is not None:
self._init_params.update(init_params)
if num_run is None:
num_run = 0
self.num_run = num_run
logger_name = '%s(%d):%s' % (self.__class__.__name__.split('.')[-1],
self.seed, self.datamanager.name)
if self.port is None:
self.logger = logging.getLogger(__name__)
else:
self.logger = get_named_client_logger(
name=logger_name,
port=self.port,
)
self.Y_optimization: Optional[Union[List, np.ndarray]] = None
self.Y_actual_train = None
self.budget = budget
self.budget_type = budget_type
# Please mypy to prevent not defined attr
self.model = self._get_model()
def _get_model(self) -> BaseEstimator:
if not isinstance(self.configuration, Configuration):
model = self.model_class(
config=self.configuration,
random_state=self.seed,
init_params=self._init_params)
else:
if self.task_type in REGRESSION_TASKS:
dataset_properties = {
'task': self.task_type,
'sparse': self.datamanager.info['is_sparse'] == 1,
'multioutput': self.task_type == MULTIOUTPUT_REGRESSION,
}
else:
dataset_properties = {
'task': self.task_type,
'sparse': self.datamanager.info['is_sparse'] == 1,
'multilabel': self.task_type == MULTILABEL_CLASSIFICATION,
'multiclass': self.task_type == MULTICLASS_CLASSIFICATION,
}
model = self.model_class(
config=self.configuration,
dataset_properties=dataset_properties,
random_state=self.seed,
include=self.include,
exclude=self.exclude,
init_params=self._init_params)
return model
def _loss(
self,
y_true: np.ndarray,
y_hat: np.ndarray,
scoring_functions: Optional[List[Scorer]] = None
) -> Union[float, Dict[str, float]]:
"""Auto-tabular follows a minimization goal. The calculate_loss
internally translate a score function to a minimization problem.
For a dummy prediction, the worst result is assumed.
Parameters
----------
y_true
"""
scoring_functions = (
self.scoring_functions
if scoring_functions is None else scoring_functions)
if not isinstance(self.configuration, Configuration):
if scoring_functions:
return {self.metric.name: self.metric._worst_possible_result}
else:
return self.metric._worst_possible_result
return calculate_loss(
y_true,
y_hat,
self.task_type,
self.metric,
scoring_functions=scoring_functions)
def finish_up(
self,
loss: Union[Dict[str, float], float],
train_loss: Optional[Union[float, Dict[str, float]]],
opt_pred: np.ndarray,
valid_pred: np.ndarray,
test_pred: np.ndarray,
additional_run_info: Optional[TYPE_ADDITIONAL_INFO],
file_output: bool,
final_call: bool,
status: StatusType,
) -> Tuple[float, Union[float, Dict[str, float]], int, Dict[str, Union[
str, int, float, Dict, List, Tuple]]]:
"""This function does everything necessary after the fitting is done:
* predicting
* saving the files for the ensembles_statistics
* generate output for SMAC
We use it as the signal handler so we can recycle the code for the
normal usecase and when the runsolver kills us here :)
"""
self.duration = time.time() - self.starttime
if file_output:
file_out_loss, additional_run_info_ = self.file_output(
opt_pred,
valid_pred,
test_pred,
)
else:
file_out_loss = None
additional_run_info_ = {}
validation_loss, test_loss = self.calculate_auxiliary_losses(
valid_pred,
test_pred,
)
if file_out_loss is not None:
return self.duration, file_out_loss, self.seed, additional_run_info_
if isinstance(loss, dict):
loss_ = loss
loss = loss_[self.metric.name]
else:
loss_ = {}
additional_run_info = ({} if additional_run_info is None else
additional_run_info)
for metric_name, value in loss_.items():
additional_run_info[metric_name] = value
additional_run_info['duration'] = self.duration
additional_run_info['num_run'] = self.num_run
if train_loss is not None:
additional_run_info['train_loss'] = train_loss
if validation_loss is not None:
additional_run_info['validation_loss'] = validation_loss
if test_loss is not None:
additional_run_info['test_loss'] = test_loss
rval_dict = {
'loss': loss,
'additional_run_info': additional_run_info,
'status': status
}
if final_call:
rval_dict['final_queue_element'] = True
self.queue.put(rval_dict)
return self.duration, loss_, self.seed, additional_run_info_
def calculate_auxiliary_losses(
self,
Y_valid_pred: np.ndarray,
Y_test_pred: np.ndarray,
) -> Tuple[Optional[float], Optional[float]]:
if Y_valid_pred is not None:
if self.y_valid is not None:
validation_loss: Optional[Union[float,
Dict[str,
float]]] = self._loss(
self.y_valid,
Y_valid_pred)
if isinstance(validation_loss, dict):
validation_loss = validation_loss[self.metric.name]
else:
validation_loss = None
else:
validation_loss = None
if Y_test_pred is not None:
if self.y_test is not None:
test_loss: Optional[Union[float,
Dict[str, float]]] = self._loss(
self.y_test, Y_test_pred)
if isinstance(test_loss, dict):
test_loss = test_loss[self.metric.name]
else:
test_loss = None
else:
test_loss = None
return validation_loss, test_loss
def file_output(
self,
Y_optimization_pred: np.ndarray,
Y_valid_pred: np.ndarray,
Y_test_pred: np.ndarray,
) -> Tuple[Optional[float], Dict[str, Union[str, int, float, List, Dict,
Tuple]]]:
# Abort if self.Y_optimization is None
# self.Y_optimization can be None if we use partial-cv, then,
# obviously no output should be saved.
if self.Y_optimization is None:
return None, {}
# Abort in case of shape misalignment
if np.shape(self.Y_optimization)[0] != Y_optimization_pred.shape[0]:
return (
1.0,
{
'error':
"Targets %s and prediction %s don't have "
"the same length. Probably training didn't "
'finish' %
(np.shape(self.Y_optimization), Y_optimization_pred.shape)
},
)
# Abort if predictions contain NaNs
for y, s in [
# Y_train_pred deleted here. Fix unittest accordingly.
[Y_optimization_pred, 'optimization'],
[Y_valid_pred, 'validation'],
[Y_test_pred, 'test']
]:
if y is not None and not np.all(np.isfinite(y)):
return (
1.0,
{
'error':
'Model predictions for %s set contains NaNs.' % s
},
)
# Abort if we don't want to output anything.
# Since disable_file_output can also be a list, we have to explicitly
# compare it with True.
if self.disable_file_output is True:
return None, {}
# Notice that disable_file_output==False and disable_file_output==[]
# means the same thing here.
if self.disable_file_output is False:
self.disable_file_output = []
# Here onwards, the self.disable_file_output can be treated as a list
self.disable_file_output = cast(List, self.disable_file_output)
# This file can be written independently of the others down bellow
if ('y_optimization' not in self.disable_file_output):
if self.output_y_hat_optimization:
self.backend.save_targets_ensemble(self.Y_optimization)
models: Optional[BaseEstimator] = None
if hasattr(self, 'models'):
if len(self.models) > 0 and self.models[
0] is not None: # type: ignore[attr-defined]
if ('models' not in self.disable_file_output):
if self.task_type in CLASSIFICATION_TASKS:
models = VotingClassifier(
estimators=None,
voting='soft',
)
else:
models = VotingRegressor(estimators=None)
# Mypy cannot understand hasattr yet
models.estimators_ = self.models # type: ignore[attr-defined]
self.backend.save_numrun_to_dir(
seed=self.seed,
idx=self.num_run,
budget=self.budget,
model=self.model
if 'model' not in self.disable_file_output else None,
cv_model=models
if 'cv_model' not in self.disable_file_output else None,
ensemble_predictions=(Y_optimization_pred if 'y_optimization'
not in self.disable_file_output else None),
valid_predictions=(Y_valid_pred if 'y_valid'
not in self.disable_file_output else None),
test_predictions=(Y_test_pred if 'y_test'
not in self.disable_file_output else None),
)
return None, {}
def _predict_proba(
self,
X: np.ndarray,
model: BaseEstimator,
task_type: int,
Y_train: Optional[np.ndarray] = None,
) -> np.ndarray:
def send_warnings_to_log(
message: Union[Warning, str],
category: Type[Warning],
filename: str,
lineno: int,
file: Optional[TextIO] = None,
line: Optional[str] = None,
) -> None:
self.logger.debug('%s:%s: %s:%s' %
(filename, lineno, str(category), message))
return
with warnings.catch_warnings():
warnings.showwarning = send_warnings_to_log
Y_pred = model.predict_proba(X, batch_size=1000)
if Y_train is None:
raise ValueError('Y_train is required for classification problems')
Y_pred = self._ensure_prediction_array_sizes(Y_pred, Y_train)
return Y_pred
def _predict_regression(
self,
X: np.ndarray,
model: BaseEstimator,
task_type: int,
Y_train: Optional[np.ndarray] = None) -> np.ndarray:
def send_warnings_to_log(
message: Union[Warning, str],
category: Type[Warning],
filename: str,
lineno: int,
file: Optional[TextIO] = None,
line: Optional[str] = None,
) -> None:
self.logger.debug('%s:%s: %s:%s' %
(filename, lineno, str(category), message))
return
with warnings.catch_warnings():
warnings.showwarning = send_warnings_to_log
Y_pred = model.predict(X)
if len(Y_pred.shape) == 1:
Y_pred = Y_pred.reshape((-1, 1))
return Y_pred
def _ensure_prediction_array_sizes(self, prediction: np.ndarray,
Y_train: np.ndarray) -> np.ndarray:
num_classes = self.datamanager.info['label_num']
if self.task_type == MULTICLASS_CLASSIFICATION and \
prediction.shape[1] < num_classes:
if Y_train is None:
raise ValueError('Y_train must not be None!')
classes = list(np.unique(Y_train))
mapping = dict()
for class_number in range(num_classes):
if class_number in classes:
index = classes.index(class_number)
mapping[index] = class_number
new_predictions = np.zeros((prediction.shape[0], num_classes),
dtype=np.float32)
for index in mapping:
class_index = mapping[index]
new_predictions[:, class_index] = prediction[:, index]
return new_predictions
return prediction
| [
"jianzhnie@126.com"
] | jianzhnie@126.com |
24f9f16f69a7c79ea54993ed409d8c5073aeb4a2 | f2f89f00d60559c4a412a575da686228509b9595 | /test/range_test.py | 519736ae1e72505ecd445bfaa5dd6f47dbc28d29 | [
"MIT"
] | permissive | csb-comren/dbinterfacer | 425ce07e827f5d9d6423de21082b32cd1820c399 | b69b5885e092ca5d913437642886e03cfe93504f | refs/heads/master | 2020-03-19T06:48:17.737511 | 2019-06-05T16:41:57 | 2019-06-05T16:41:57 | 136,056,778 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | from dbinterfacer.uploaders import NmeaUploader
from dbinterfacer.helpers.pointmodel import Point_Model
from secret import local_url, comren_url
f = open('test/data/NMEA.txt', 'rb')
# p = u.point_model.generate_point()
# print(p)
# u.determine_tables(p)
# print(p)
# u.upload(local_url, [1])
u = NmeaUploader(local_url, 'simple depth')
u.parse_file(f)
print(u.get_time_range_and_bbox())
| [
"jaykaron@gmail.com"
] | jaykaron@gmail.com |
b0a6a9fb4b673fef435c9369de71747c5e8d9c22 | b8ebe0cc58e4f6ef07ef889fb5d5e81c46ccb82b | /tests/test_add.py | bdd067f382e1648871d32e2f3ddb062edd0b3f92 | [
"MIT"
] | permissive | jacksonllee/python-library-template | 20da6f5aa9d3f5ffb18d64e1c577b73ddaaf26b1 | 1ee5d47975ea9e0ed7fdf82701bcb83d1852dce6 | refs/heads/main | 2022-08-29T03:17:49.416812 | 2022-08-27T19:25:21 | 2022-08-27T19:25:21 | 193,034,751 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | from foobar.add import add
def test_add():
assert add(1, 2) == 3
| [
"jacksonlunlee@gmail.com"
] | jacksonlunlee@gmail.com |
217c9954a80b7cdde3023d805047880a0d4f800e | ffe86787b92e1eaa9ca8e482fbd3b34f3d5ce2a5 | /manage.py | 869a07db92b66599eaf9325d798bb9014e519e93 | [] | no_license | rajatarora21/BeMyFriend | 59a11fc614dc2d908953de87a6023929ba20d08b | 2e328c013814fb65b894a4be2fbddc4bdb3c35be | refs/heads/master | 2020-08-03T02:22:49.999592 | 2019-10-10T01:04:04 | 2019-10-10T01:04:04 | 211,595,803 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 630 | py | #!/usr/bin/env python
"""Django's command-line utility for administrative tasks."""
import os
import sys
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'BeMyFriend.settings')
try:
from django.core.management import execute_from_command_line
except ImportError as exc:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
) from exc
execute_from_command_line(sys.argv)
if __name__ == '__main__':
main()
| [
"arorarajat9999@gmail.com"
] | arorarajat9999@gmail.com |
b843de38c9488e62441a89a633f1336a972f423a | 0d91c86aa0c70115d70f09e3e45460df73dcc652 | /alpha_a.py | d8263fba6f7a743eb66fc076ec23ea33da0d66a6 | [] | no_license | Michael-Gong/DLA_project | 589791a3ca5dba7a7d5b9a170c9e2ad712a3ae36 | 3a6211451cc404d772246f9c2b60e0c97576cfef | refs/heads/master | 2021-04-27T08:11:37.414851 | 2019-01-18T05:24:40 | 2019-01-18T05:24:40 | 122,650,552 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,172 | py | %matplotlib inline
#import sdf
import matplotlib
import matplotlib as mpl
mpl.style.use('https://raw.githubusercontent.com/Michael-Gong/DLA_project/master/style')
#matplotlib.use('agg')
import matplotlib.pyplot as plt
import numpy as np
from numpy import ma
from matplotlib import colors, ticker, cm
from matplotlib.mlab import bivariate_normal
from optparse import OptionParser
import os
from mpl_toolkits.mplot3d import Axes3D
import random
from mpl_toolkits import mplot3d
from matplotlib import rc
import matplotlib.transforms as mtransforms
import sys
#rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
## for Palatino and other serif fonts use:
#rc('font',**{'family':'serif','serif':['Palatino']})
#rc('text', usetex=True)
font = {'family' : 'Carlito',
'color' : 'black',
'weight' : 'normal',
'size' : 25,
}
#plt.scatter(theta_x/np.pi*180, arg_gg, c=np.linspace(1,np.size(theta_x),np.size(theta_x))[np.newaxis,:], s=20, cmap='nipy_spectral', edgecolors='None')
#cbar=plt.colorbar(ticks=np.linspace(1, np.size(theta_x), 5), shrink=1)# orientation='horizontal', shrink=0.2)
#cbar.set_label(r'$Nth$', fontdict=font)
#plt.xlim(-45,45)
##print(theta_x)
#plt.xlabel(r'$\theta\ [degree]$',fontdict=font)
#plt.ylabel(r'$\gamma$',fontdict=font)
##plt.xticks(fontsize=30); plt.yticks(fontsize=30);
##plt.ylim(0,2000.0)
a0=np.linspace(10,210,1001)
#alpha=0.04**1.5*a0/(4.6**0.75)
alpha= (179.0**0.5*a0**2/2.3e6-9.6*a0**2/2.03e6-1.3e1/2.03e6)**0.5
#plt.plot(a0,alpha,'-k',linewidth=4)
plt.plot(a0,(a0**2-6.5)**0.5/1000.0,'-k',linewidth=4)
alpha=0.04**1.5*a0/(4.6**0.75)
#plt.plot(a0,alpha,'--b',linewidth=4)
u = 1.0/12.5
a0_1=np.array([10,25,50,75,100,125,150,200])
alpha_1=np.array([-2+2*u,-2+6*u,-2+10*u,-2+11*u,-1+1.5*u,-1+3*u,-1+4*u,-1+5*u])
plt.scatter(a0_1,10**(alpha_1-0.25*u),marker='+',s=40,color='r')
plt.xlabel(r'$a_0$',fontdict=font)
plt.ylabel(r'$\alpha$',fontdict=font)
plt.xticks(fontsize=30); plt.yticks(fontsize=30);
plt.yscale('log')
plt.ylim(10**-2,10**0)
fig = plt.gcf()
#fig.set_size_inches(30, 15)
fig.set_size_inches(8, 4)
#fig.savefig('./bunch_theta_en.png',format='png',dpi=160)
#plt.close("all")
| [
"noreply@github.com"
] | noreply@github.com |
9f369caf27b979dd71e6839290f58bd026ced437 | d49f1fa8643a9203193f5ab06f2ab048c43e511b | /M8_Fullstack/d3_SQL/project.py | 59c84bdf8bc44284e38d0060e6653ac497225db8 | [] | no_license | lpianta/ai_fall_exercises | 5cb768a619d0435f485d415ff520c4793d296673 | 997876f902849fb5eb3f45dc8bf77dac41c33480 | refs/heads/main | 2023-05-29T08:04:21.646204 | 2021-06-03T08:21:34 | 2021-06-03T08:21:34 | 317,841,076 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 678 | py | import sql_database
project = """CREATE TABLE IF NOT EXISTS project (
NameP VARCHAR(20),
Topic VARCHAR(20) NOT NULL,
St int NOT NULL PRIMARY KEY,
Grade float,
id int not null,
tch VARCHAR(20) NOT NULL,
index int
FOREIGN KEY(st) REFERENCES student(id) ON UPDATE RESTRICT
FOREIGN KEY(tch) REFERENCES teachers(id) ON UPDATE RESTRICT
);"""
sql_database.sql_execute(project)
sql_database.pd_upload_csv('project', './Dataset/project.csv')
df = sql_database.pandas_select("select * from project")
print(df)
sql_database.close()
| [
"pianta.luca@gmail.com"
] | pianta.luca@gmail.com |
3fabf4f4ba845759d4b8fc8576fc5bc284056ab8 | a4dfbafdb2d1cc39534a481747fe9746ebb4ef7a | /src/models/base_models/resnest_model.py | 158eb4a786862e66ce97c979d9f509c5c8e10334 | [] | no_license | huangchuanhong/dist_face_pytorch | 3f41045f662de0f9826bc5041bdd2b9abbcb9558 | dc662b713564b2c3f5a61d4ad0e8a78e4aa54a84 | refs/heads/master | 2022-12-31T23:01:26.997504 | 2020-10-26T08:29:39 | 2020-10-26T08:29:39 | 264,177,084 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,114 | py | import torch.nn as nn
from .backbones import ResNest
from ..registry import BASE_MODEL
from ..utils import constant_init, normal_init, kaiming_init
@BASE_MODEL.register_module
class ResNestModel(nn.Module):
def __init__(self,
feature_dim,
**kwargs):
super(ResNestModel, self).__init__()
self.backbone = ResNest(**kwargs)
self.gdc = nn.Conv2d(2048, 2048, groups=2048//16, kernel_size=(7, 7), stride=(1, 1), padding=(0, 0), bias=False)
self.bn = nn.BatchNorm2d(2048)
self.fc = nn.Linear(2048, feature_dim)
def init_weights(self, pretrained=None):
self.backbone.init_weights(pretrained=pretrained)
kaiming_init(self.gdc)
constant_init(self.bn, 1)
#normal_init(self.fc, std=0.01)
def forward(self, input):
output = self.backbone(input)
output = self.gdc(output)
output = self.bn(output)
output = output.view([-1, 2048])
output = self.fc(output)
return output
def train(self, mode):
self.backbone.train(mode)
self.bn.train(mode)
| [
"huangchuanhong@xgrobotics.com"
] | huangchuanhong@xgrobotics.com |
3c83baf87db5dfcbd91d068acf92999196d079f9 | 5b221c2809d82cf13a2b24a56589943315cdb381 | /2017/2017-29.py | 55d90764145038cedbdefc1a88653ac763c185a6 | [] | no_license | Bruce-V/CS-BM25 | c2cd797e9be2fc55af9c8944882fd55109ebee61 | 2401f0ddb24c1712b13c0c96e13565f60d48705d | refs/heads/main | 2023-01-04T23:29:20.906427 | 2020-11-09T08:44:22 | 2020-11-09T08:44:22 | 259,228,835 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 14,071 | py | # Copyright 2020 zicheng Zhang(18551701375@163.com)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pymongo
import re
from math import log
myclient =pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["pubmed"]
mywords = mydb["freqwords3"] #pubmed中所有的词频、化学词、关键词和主题词表
mytopic=mydb["topics2017"]#pubmed中的主题词相关文献列表
mypapers=mydb["papers"]#pubmed中文献信息表
mytopicdb=myclient["cs2017_29"]
mydata=mytopicdb["cs2017_score_29"]#按词表长度改进过后的2次排序表
mycount = mytopicdb["cs2017_score_29_related"]#聚类后对应与主题相关联的文献
def sortsecond(myfreq,mydata,yuzhi):
k = 0
k1 = 1.2
b1 = 0.75
k2 = 1.2
b2 = 0.75
idf_ampullary = log((29138919 - 2979 + 0.5) / (2979 + 0.5), 10)
idf_carcinoma = log((29138919 - 494907 + 0.5) / (494907 + 0.5), 10)
idf_kras = log((29138919 - 11153 + 0.5) / (11153 + 0.5), 10)
idf_ele_1 = log((13670358 - 4386 + 0.5) / (4386 + 0.5), 10)
idf_ele_2 = log((13670358 - 9122 + 0.5) / (9122 + 0.5), 10)
idf_ele_3 = log((13670358 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_1 = log((25389659 - 7320 + 0.5) / (7320 + 0.5), 10)
idf_eleM_2 = log((25389659 - 3644 + 0.5) / (3644 + 0.5), 10)
idf_eleM_3 = log((25389659 - 0 + 0.5) / (0 + 0.5), 10)
idf_eleM_4 = log((25389659 - 9122 + 0.5) / (9122 + 0.5), 10)
idf_eleM_5 = log((25389659 - 12216 + 0.5) / (12216 + 0.5), 10)
idf_eleM_6 = log((25389659 - 17437618 + 0.5) / (17437618 + 0.5), 10)
idf_eleM_7 = log((25389659 - 8002162 + 0.5) / (8002162 + 0.5), 10)
idf_eleM_8 = log((25389659 - 4029038 + 0.5) / (4029038 + 0.5), 10)
idf_eleM_9 = log((25389659 - 2842020 + 0.5) / (2842020 + 0.5), 10)
idf_eleM_10 = log((25389659 - 4785026 + 0.5) / (4785026 + 0.5), 10)
idf_eleK_1 = log((5435471 - 48 + 0.5) / (48 + 0.5), 10)
idf_eleK_2 = log((5435471 - 1503 + 0.5) / (1503 + 0.5), 10)
for x in myfreq.find({}, {'PMID', 'wordfreq', 'ChemicalNameList', 'MeshHeadingNameList', 'KeywordsList'},
no_cursor_timeout=True):
ss1 = 0
ss2 = 0
ss4 = 0
len_freq = 0
ampullary_score = 0
carcinoma_score = 0
kras_score = 0
gx = 0
gx1 = 0
gx2 = 0
gx3 = 0
if int(x['PMID']) <= 27868941:
cop = re.compile("[^\u4e00-\u9fa5^a-z^A-Z^0-9]") # 匹配不是中文、大小写、数字的其他字符
ChemicalNameList = x['ChemicalNameList']
MeshHeadingNameList = x['MeshHeadingNameList']
KeywordsList = x['KeywordsList']
wordfreq = x['wordfreq']
ampullary = [True for x in wordfreq.items() if 'ampullary' in x]
carcinoma = [True for x in wordfreq.items() if 'carcinoma' in x]
# ---------------摘要统计-------------------#
for key in wordfreq:
len_freq = len_freq + wordfreq[key]
for key in wordfreq:
if 'ampullary ' in key:
ampullary_score = ampullary_score + wordfreq[key]
for key in wordfreq:
key1 = cop.sub('', key)
if 'carcinoma' in key1:
carcinoma_score = carcinoma_score + wordfreq[key]
for key in wordfreq:
key1 = cop.sub('', key)
if 'kras' in key1:
kras_score = kras_score + wordfreq[key]
#---------------共现分析摘要-------------------#
if len(ampullary) != 0 and ampullary[0] and len(carcinoma) != 0 and carcinoma[0]:
for key in wordfreq:
key1 = cop.sub('', key)
if 'kras' in key1:
gx = idf_kras
break
# ---------------共现分析化学-------------------#
if len(ampullary) != 0 and ampullary[0] and len(carcinoma) != 0 and carcinoma[0]:
for ele in ChemicalNameList:
if 'ras' in ele['NameOfSubstance']:
gx = idf_kras
break
# ---------------共现分析医学主题词-------------------#
if len(ampullary) != 0 and ampullary[0] and len(carcinoma) != 0 and carcinoma[0]:
for eleM in MeshHeadingNameList:
if 'ras' in eleM['MeshHeadingName']:
gx = idf_kras
break
# ---------------共现分析关键字-------------------#
if len(ampullary) != 0 and ampullary[0] and len(carcinoma) != 0 and carcinoma[0]:
for eleK in KeywordsList:
if 'kras' in str(eleK).lower():
gx = idf_kras
break
bm25_ampullary_score = (((k1 + 1) * ampullary_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + ampullary_score))
bm25_carcinoma_score = (((k1 + 1) * carcinoma_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + carcinoma_score))
bm25_kras_score = (((k1 + 1) * kras_score) / ((k1 * (b1 + (1 - b1) * (len_freq / 83))) + kras_score))
bm25_ab_score = idf_ampullary * bm25_ampullary_score + idf_carcinoma * bm25_carcinoma_score + idf_kras * bm25_kras_score
idf_para = [{str(ampullary_score): idf_ampullary}, {str(carcinoma_score): idf_carcinoma},{str(kras_score): idf_kras}]
for ele in ChemicalNameList:
# if re.findall(r'(BRAF|Proto-Oncogene Proteins B-raf|human|humans|male)',ele['NameOfSubstance']):
if 'KRAS' in ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_1
break
for ele in ChemicalNameList:
# if re.findall(r'(BRAF|Proto-Oncogene Proteins B-raf|human|humans|male)',ele['NameOfSubstance']):
if 'Proto-Oncogene Proteins p21(ras)' in ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_2
break
for ele in ChemicalNameList:
# if re.findall(r'(BRAF|Proto-Oncogene Proteins B-raf|human|humans|male)',ele['NameOfSubstance']):
if 'Genes, ras' in ele['NameOfSubstance']:
ss1 = ss1 + idf_ele_3
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Ampulla of Vater' in eleM['MeshHeadingName']:
ss2 = ss2 +idf_eleM_1
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Common Bile Duct Neoplasms' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_2
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'KRAS' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_3
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Proto-Oncogene Proteins p21(ras)' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_4
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Genes, ras' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_5
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if re.findall(r'(Human|Humans)', eleM['MeshHeadingName']):
ss2 = ss2 + idf_eleM_6
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Male' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_7
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Middle Aged' in eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_8
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'Aged' == eleM['MeshHeadingName']:
ss2 = ss2 + idf_eleM_9
break
for eleM in MeshHeadingNameList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if re.findall(r'(Adult|Adults)', eleM['MeshHeadingName']):
ss2 = ss2 + idf_eleM_10
break
for eleK in KeywordsList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'ampullary carcinoma' in str(eleK).lower():
ss4 = ss4 + idf_eleK_1
break
for eleK in KeywordsList:
# if re.findall(r'(Melanoma|Proto-Oncogene Proteins B-raf|Humans|Neoplasms|Neoplasm|Male|Mutation|Mutational)',eleM['MeshHeadingName']):
if 'kras' in str(eleK).lower():
ss4 = ss4 + idf_eleK_2
break
total_gx = (gx + gx1 + gx2 + gx3)
cmk_len = len(ChemicalNameList) + len(MeshHeadingNameList) + len(KeywordsList)
bm25_cmk_len = ss1 + ss2 + ss4
bm25_cmk_score = (((k2 + 1) * bm25_cmk_len) / ((k2 * (b2 + (1 - b2) * (cmk_len / 13))) + bm25_cmk_len))
bm25_score = bm25_ab_score + bm25_cmk_score + total_gx
if (bm25_score > yuzhi):
mydict = {"PMID": x['PMID'], "ab_score": bm25_ab_score, "idf_para": idf_para,
"cmk_len": cmk_len, "cmk_freq": bm25_cmk_len, "bm25_cmk_score": bm25_cmk_score,
"gx": total_gx,
"bm25_score": bm25_score,
"ChemicalNameList": x['ChemicalNameList'],
"MeshHeadingNameList": x['MeshHeadingNameList'], "KeywordsList": x['KeywordsList']}
y = mydata.insert_one(mydict)
k = k + 1
print(str(y) + '---------' + str(k))
def count(mysort,mycount,topic):
for x in mysort.find({},
{'PMID', 'ab_score', 'idf_para', 'cmk_len', 'cmk_freq', 'bm25_cmk_score', 'gx', 'bm25_score',
'ChemicalNameList', 'MeshHeadingNameList', 'KeywordsList'}):
kk = 0
for y in mytopic.find({"topic": topic}, {'PMID', 'relate'}):
if x['PMID'] == y['PMID']:
mydict = {"PMID": x['PMID'], "related": y['relate'], "ab_score": x["ab_score"],
"idf_para": x['idf_para'],
"cmk_len": x['cmk_len'], "cmk_freq": x['cmk_freq'], 'bm25_cmk_score': x['bm25_cmk_score'],
'gx': x['gx'],
"bm25_score": x['bm25_score'],
"ChemicalNameList": x['ChemicalNameList'], "MeshHeadingNameList": x['MeshHeadingNameList'],
"KeywordsList": x['KeywordsList']}
ss = mycount.insert_one(mydict)
print(ss)
kk = kk + 1
if (kk == 0):
mydict = {"PMID": x['PMID'], "related": -1, "ab_score": x["ab_score"], "idf_para": x['idf_para'],
"cmk_len": x['cmk_len'], "cmk_freq": x['cmk_freq'], 'bm25_cmk_score': x['bm25_cmk_score'],
'gx': x['gx'],
"bm25_score": x['bm25_score'],
"ChemicalNameList": x['ChemicalNameList'], "MeshHeadingNameList": x['MeshHeadingNameList'],
"KeywordsList": x['KeywordsList']}
ss = mycount.insert_one(mydict)
print(ss)
if __name__ == '__main__':
sortsecond(mywords,mydata,6)
count(mydata,mycount,"29")
| [
"1714624946@qq.com"
] | 1714624946@qq.com |
db080b1433d8d0062e349ca5f74f3c43d95a0747 | 8c4923e80377501e919b46ab1442d8d9843acbcf | /bin/tensorboard | 0b059c7a4b81183dd82e7ff63a03b8cdd557ff17 | [
"MIT"
] | permissive | khaifahmi99/embeddings | 943fe221d71d235a53b4bc1075ed6df9ebb7e5ad | 9213a684e8cce71cd7f26717cc06593a777b5b84 | refs/heads/master | 2022-10-22T07:51:43.291162 | 2019-10-12T05:11:35 | 2019-10-12T05:11:35 | 214,581,631 | 0 | 0 | MIT | 2022-09-30T19:46:07 | 2019-10-12T05:10:04 | Python | UTF-8 | Python | false | false | 276 | #!/Users/khairunnasulfahmi/Desktop/GraphEmbedding-master/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from tensorboard.main import run_main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(run_main())
| [
"khaifahmi99@github.com"
] | khaifahmi99@github.com | |
02f79c80ab4b1412100a41752657cb3b9aebad7a | b4e5f627c489472b8348f070ddb6c07eb8ce0c33 | /sentiment_logistic_regression/sentiment_logistic_regression.py | b79f1302918bb33feed417a6994fdc2178ace615 | [] | no_license | bialeckim5/NLP_Story_Cloze_Public | f350f2f843cc74806b8692a6270c5f60db2b9f26 | db0795c361972553fd0e6314c26702aeb55f07b9 | refs/heads/master | 2022-12-22T17:08:13.782618 | 2019-12-08T22:01:31 | 2019-12-08T22:01:31 | 243,827,703 | 0 | 0 | null | 2022-12-08T07:22:23 | 2020-02-28T18:21:55 | Python | UTF-8 | Python | false | false | 1,366 | py | import os
import pandas
from numpy import dot
from numpy.linalg import norm
import numpy as np
from sklearn.linear_model import LogisticRegression
import ast
data_dir = os.path.join(os.getcwd(), os.path.normpath("../Data"))
data_path = os.path.join(data_dir, "Data_sentiment.csv")
test_path = os.path.join(data_dir, "Test_sentiment.csv")
validation_path = os.path.join(data_dir, "Validation_sentiment.csv")
output_path = os.path.join(os.getcwd(), "prediction.txt")
gold_path = os.path.join(data_dir, "gold.txt")
def get_compound(x):
return ast.literal_eval(x)['compound']
train_df = pandas.read_csv(data_path)
test_df = pandas.read_csv(validation_path)
# val_df = pandas.read_csv(test_path)
train_expected = train_df['AnswerRightEnding']
test_expected = test_df['AnswerRightEnding']
train_df = train_df.drop('AnswerRightEnding', 1)
test_df = test_df.drop('AnswerRightEnding', 1)
train_df = train_df.drop('InputStoryid', 1)
test_df = test_df.drop('InputStoryid', 1)
train_df = train_df.drop('Unnamed: 0', 1)
test_df = test_df.drop('Unnamed: 0', 1)
train_df = train_df.applymap(get_compound)
test_df = test_df.applymap(get_compound)
model = LogisticRegression()
model.fit(train_df, train_expected)
predicted = model.predict(test_df)
np.savetxt(output_path,predicted, delimiter=",", fmt='%i')
test_expected.to_csv(gold_path, header=False, index=False)
| [
"noreply@github.com"
] | noreply@github.com |
2dbe3592640b1ce3372c9bdef4af9a9a748f92ba | 4740e71b340e9a166cbd6b8108925049ce112861 | /cimcb_lite/plot/roc.py | f395abdd0eefc318123ded627ede3c591cd85e1f | [
"MIT"
] | permissive | RuibingS/cimcb | 84afba916e13edc9bbb74cecb67bf0b8fe0accc9 | 382f7d8fff30d3d276f18ac8c7dc686e0e643fa9 | refs/heads/master | 2023-08-29T10:52:36.412398 | 2021-11-15T15:12:22 | 2021-11-15T15:12:22 | 428,313,404 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 9,035 | py | import numpy as np
from bokeh.models import Band, HoverTool
from bokeh.plotting import ColumnDataSource, figure
from scipy import interp
from sklearn import metrics
from sklearn.metrics import confusion_matrix, roc_auc_score
from sklearn.utils import resample
from ..utils import binary_metrics
def roc_plot(fpr, tpr, tpr_ci, width=450, height=350, xlabel="1-Specificity", ylabel="Sensitivity", legend=True, label_font_size="13pt", title="", errorbar=False, grid_line=True):
"""Creates a rocplot using Bokeh.
Parameters
----------
fpr : array-like, shape = [n_samples]
False positive rates. Calculate using roc_calculate.
tpr : array-like, shape = [n_samples]
True positive rates. Calculate using roc_calculate.
tpr_ci : array-like, shape = [n_samples, 2]
True positive rates 95% confidence intervals [lowci, uppci]. Calculate using roc_calculate.
"""
# Get CI
tpr_lowci = tpr_ci[0]
tpr_uppci = tpr_ci[1]
auc = metrics.auc(fpr, tpr)
# specificity and ci-interval for HoverTool
spec = 1 - fpr
ci = (tpr_uppci - tpr_lowci) / 2
# Figure
data = {"x": fpr, "y": tpr, "lowci": tpr_lowci, "uppci": tpr_uppci, "spec": spec, "ci": ci}
source = ColumnDataSource(data=data)
fig = figure(title=title, plot_width=width, plot_height=height, x_axis_label=xlabel, y_axis_label=ylabel, x_range=(-0.06, 1.06), y_range=(-0.06, 1.06))
# Figure: add line
fig.line([0, 1], [0, 1], color="black", line_dash="dashed", line_width=2.5, legend="Equal distribution line")
figline = fig.line("x", "y", color="green", line_width=3.5, alpha=0.6, legend="ROC Curve (Train)", source=source)
fig.add_tools(HoverTool(renderers=[figline], tooltips=[("Specificity", "@spec{1.111}"), ("Sensitivity", "@y{1.111} (+/- @ci{1.111})")]))
# Figure: add 95CI band
figband = Band(base="x", lower="lowci", upper="uppci", level="underlay", fill_alpha=0.1, line_width=1, line_color="black", fill_color="green", source=source)
fig.add_layout(figband)
# Figure: add errorbar spec = 1 - fpr
if errorbar is not False:
idx = np.abs(fpr - (1 - errorbar)).argmin() # this find the closest value in fpr to errorbar fpr
fpr_eb = fpr[idx]
tpr_eb = tpr[idx]
tpr_lowci_eb = tpr_lowci[idx]
tpr_uppci_eb = tpr_uppci[idx]
# Edge case: If this is a perfect roc curve, and specificity >= 1, make sure error_bar is at (0,1) not (0,0)
if errorbar >= 1:
for i in range(len(fpr)):
if fpr[i] == 0 and tpr[i] == 1:
fpr_eb = 0
tpr_eb = 1
tpr_lowci_eb = 1
tpr_uppci_eb = 1
roc_whisker_line = fig.multi_line([[fpr_eb, fpr_eb]], [[tpr_lowci_eb, tpr_uppci_eb]], line_alpha=1, line_color="black")
roc_whisker_bot = fig.multi_line([[fpr_eb - 0.03, fpr_eb + 0.03]], [[tpr_lowci_eb, tpr_lowci_eb]], line_color="black")
roc_whisker_top = fig.multi_line([[fpr_eb - 0.03, fpr_eb + 0.03]], [[tpr_uppci_eb, tpr_uppci_eb]], line_alpha=1, line_color="black")
fig.circle([fpr_eb], [tpr_eb], size=8, fill_alpha=1, line_alpha=1, line_color="black", fill_color="white")
# Change font size
fig.title.text_font_size = "11pt"
fig.xaxis.axis_label_text_font_size = label_font_size
fig.yaxis.axis_label_text_font_size = label_font_size
fig.legend.label_text_font = "10pt"
# Extra padding
fig.min_border_left = 20
fig.min_border_right = 20
fig.min_border_top = 20
fig.min_border_bottom = 20
# Remove grid lines
if grid_line == False:
fig.xgrid.visible = False
fig.ygrid.visible = False
# Edit legend
fig.legend.location = "bottom_right"
fig.legend.label_text_font_size = "10pt"
if legend is False:
fig.legend.visible = False
return fig
def roc_calculate(Ytrue, Yscore, bootnum=1000, metric=None, val=None):
"""Calculates required metrics for the roc plot function (fpr, tpr, and tpr_ci).
Parameters
----------
Ytrue : array-like, shape = [n_samples]
Binary label for samples (0s and 1s)
Yscore : array-like, shape = [n_samples]
Predicted y score for samples
Returns
----------------------------------
fpr : array-like, shape = [n_samples]
False positive rates.
tpr : array-like, shape = [n_samples]
True positive rates.
tpr_ci : array-like, shape = [n_samples, 2]
True positive rates 95% confidence intervals [lowci, uppci].
"""
# Get fpr, tpr
fpr, tpr, threshold = metrics.roc_curve(Ytrue, Yscore, pos_label=1, drop_intermediate=False)
# fpr, tpr with drop_intermediates for fpr = 0 (useful for plot... since we plot specificity on x-axis, we don't need intermediates when fpr=0)
tpr0 = tpr[fpr == 0][-1]
tpr = np.concatenate([[tpr0], tpr[fpr > 0]])
fpr = np.concatenate([[0], fpr[fpr > 0]])
# if metric is provided, calculate stats
if metric is not None:
specificity, sensitivity, threshold = get_spec_sens_cuttoff(Ytrue, Yscore, metric, val)
stats = get_stats(Ytrue, Yscore, specificity)
stats["val_specificity"] = specificity
stats["val_sensitivity"] = specificity
stats["val_cutoffscore"] = threshold
# bootstrap using vertical averaging
tpr_boot = []
boot_stats = []
for i in range(bootnum):
# Resample and get tpr, fpr
Ytrue_res, Yscore_res = resample(Ytrue, Yscore)
fpr_res, tpr_res, threshold_res = metrics.roc_curve(Ytrue_res, Yscore_res, pos_label=1, drop_intermediate=False)
# Drop intermediates when fpr=0
tpr0_res = tpr_res[fpr_res == 0][-1]
tpr_res = np.concatenate([[tpr0_res], tpr_res[fpr_res > 0]])
fpr_res = np.concatenate([[0], fpr_res[fpr_res > 0]])
# Vertical averaging... use closest fpr_res to fpr, and append the corresponding tpr
idx = [np.abs(i - fpr_res).argmin() for i in fpr]
tpr_list = tpr_res[idx]
tpr_boot.append(tpr_list)
# if metric is provided, calculate stats
if metric is not None:
stats_res = get_stats(Ytrue_res, Yscore_res, specificity)
boot_stats.append(stats_res)
# Get CI for bootstat
if metric is not None:
bootci_stats = {}
for i in boot_stats[0].keys():
stats_i = [k[i] for k in boot_stats]
stats_i = np.array(stats_i)
stats_i = stats_i[~np.isnan(stats_i)] # Remove nans
try:
lowci = np.percentile(stats_i, 2.5)
uppci = np.percentile(stats_i, 97.5)
except IndexError:
lowci = np.nan
uppci = np.nan
bootci_stats[i] = [lowci, uppci]
# Get CI for tpr
tpr_lowci = np.percentile(tpr_boot, 2.5, axis=0)
tpr_uppci = np.percentile(tpr_boot, 97.5, axis=0)
# Add the starting 0
tpr = np.insert(tpr, 0, 0)
fpr = np.insert(fpr, 0, 0)
tpr_lowci = np.insert(tpr_lowci, 0, 0)
tpr_uppci = np.insert(tpr_uppci, 0, 0)
# Concatenate tpr_ci
tpr_ci = np.array([tpr_lowci, tpr_uppci])
if metric is None:
return fpr, tpr, tpr_ci
else:
return fpr, tpr, tpr_ci, stats, bootci_stats
def get_sens_spec(Ytrue, Yscore, cuttoff_val):
"""Get sensitivity and specificity from cutoff value."""
Yscore_round = np.where(np.array(Yscore) > cuttoff_val, 1, 0)
tn, fp, fn, tp = metrics.confusion_matrix(Ytrue, Yscore_round).ravel()
sensitivity = tp / (tp + fn)
specificity = tn / (tn + fp)
return sensitivity, specificity
def get_sens_cuttoff(Ytrue, Yscore, specificity_val):
"""Get sensitivity and cuttoff value from specificity."""
fpr0 = 1 - specificity_val
fpr, sensitivity, thresholds = metrics.roc_curve(Ytrue, Yscore, pos_label=1, drop_intermediate=False)
idx = np.abs(fpr - fpr0).argmin() # this find the closest value in fpr to fpr0
# Check that this is not a perfect roc curve
# If it is perfect, allow sensitivity = 1, rather than 0
if specificity_val == 1 and sensitivity[idx] == 0:
for i in range(len(fpr)):
if fpr[i] == 1 and sensitivity[i] == 1:
return 1, 0.5
return sensitivity[idx], thresholds[idx]
def get_spec_sens_cuttoff(Ytrue, Yscore, metric, val):
"""Return specificity, sensitivity, cutoff value provided the metric and value used."""
if metric == "specificity":
specificity = val
sensitivity, threshold = get_sens_cuttoff(Ytrue, Yscore, val)
elif metric == "cutoffscore":
threshold = val
sensitivity, specificity = get_sens_spec(Ytrue, Yscore, val)
return specificity, sensitivity, threshold
def get_stats(Ytrue, Yscore, specificity):
"""Calculates binary metrics given the specificity."""
sensitivity, cutoffscore = get_sens_cuttoff(Ytrue, Yscore, specificity)
stats = binary_metrics(Ytrue, Yscore, cut_off=cutoffscore)
return stats
| [
"hvbever@gmail.com"
] | hvbever@gmail.com |
d0303d53dd3eba23fd2b686900359aa35a47c0bb | 18aee5d93a63eab684fe69e3aa0abd1372dd5d08 | /python/paddle/vision/models/alexnet.py | 4239395c03319dd88ea9923153eb9cc250de73f0 | [
"Apache-2.0"
] | permissive | Shixiaowei02/Paddle | 8d049f4f29e281de2fb1ffcd143997c88078eadb | 3d4d995f26c48f7792b325806ec3d110fc59f6fc | refs/heads/develop | 2023-06-26T06:25:48.074273 | 2023-06-14T06:40:21 | 2023-06-14T06:40:21 | 174,320,213 | 2 | 1 | Apache-2.0 | 2022-12-28T05:14:30 | 2019-03-07T10:09:34 | C++ | UTF-8 | Python | false | false | 7,002 | py | # copyright (c) 2022 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
import paddle.nn.functional as F
from paddle import nn
from paddle.fluid.param_attr import ParamAttr
from paddle.nn import Conv2D, Dropout, Linear, MaxPool2D, ReLU
from paddle.nn.initializer import Uniform
from paddle.utils.download import get_weights_path_from_url
model_urls = {
"alexnet": (
"https://paddle-imagenet-models-name.bj.bcebos.com/dygraph/AlexNet_pretrained.pdparams",
"7f0f9f737132e02732d75a1459d98a43",
)
}
__all__ = []
class ConvPoolLayer(nn.Layer):
def __init__(
self,
input_channels,
output_channels,
filter_size,
stride,
padding,
stdv,
groups=1,
act=None,
):
super().__init__()
self.relu = ReLU() if act == "relu" else None
self._conv = Conv2D(
in_channels=input_channels,
out_channels=output_channels,
kernel_size=filter_size,
stride=stride,
padding=padding,
groups=groups,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._pool = MaxPool2D(kernel_size=3, stride=2, padding=0)
def forward(self, inputs):
x = self._conv(inputs)
if self.relu is not None:
x = self.relu(x)
x = self._pool(x)
return x
class AlexNet(nn.Layer):
"""AlexNet model from
`"ImageNet Classification with Deep Convolutional Neural Networks"
<https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf>`_.
Args:
num_classes (int, optional): Output dim of last fc layer. If num_classes <= 0, last fc layer
will not be defined. Default: 1000.
Returns:
:ref:`api_paddle_nn_Layer`. An instance of AlexNet model.
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import AlexNet
alexnet = AlexNet()
x = paddle.rand([1, 3, 224, 224])
out = alexnet(x)
print(out.shape)
# [1, 1000]
"""
def __init__(self, num_classes=1000):
super().__init__()
self.num_classes = num_classes
stdv = 1.0 / math.sqrt(3 * 11 * 11)
self._conv1 = ConvPoolLayer(3, 64, 11, 4, 2, stdv, act="relu")
stdv = 1.0 / math.sqrt(64 * 5 * 5)
self._conv2 = ConvPoolLayer(64, 192, 5, 1, 2, stdv, act="relu")
stdv = 1.0 / math.sqrt(192 * 3 * 3)
self._conv3 = Conv2D(
192,
384,
3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
stdv = 1.0 / math.sqrt(384 * 3 * 3)
self._conv4 = Conv2D(
384,
256,
3,
stride=1,
padding=1,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
stdv = 1.0 / math.sqrt(256 * 3 * 3)
self._conv5 = ConvPoolLayer(256, 256, 3, 1, 1, stdv, act="relu")
if self.num_classes > 0:
stdv = 1.0 / math.sqrt(256 * 6 * 6)
self._drop1 = Dropout(p=0.5, mode="downscale_in_infer")
self._fc6 = Linear(
in_features=256 * 6 * 6,
out_features=4096,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._drop2 = Dropout(p=0.5, mode="downscale_in_infer")
self._fc7 = Linear(
in_features=4096,
out_features=4096,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
self._fc8 = Linear(
in_features=4096,
out_features=num_classes,
weight_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
bias_attr=ParamAttr(initializer=Uniform(-stdv, stdv)),
)
def forward(self, inputs):
x = self._conv1(inputs)
x = self._conv2(x)
x = self._conv3(x)
x = F.relu(x)
x = self._conv4(x)
x = F.relu(x)
x = self._conv5(x)
if self.num_classes > 0:
x = paddle.flatten(x, start_axis=1, stop_axis=-1)
x = self._drop1(x)
x = self._fc6(x)
x = F.relu(x)
x = self._drop2(x)
x = self._fc7(x)
x = F.relu(x)
x = self._fc8(x)
return x
def _alexnet(arch, pretrained, **kwargs):
model = AlexNet(**kwargs)
if pretrained:
assert (
arch in model_urls
), "{} model do not have a pretrained model now, you should set pretrained=False".format(
arch
)
weight_path = get_weights_path_from_url(
model_urls[arch][0], model_urls[arch][1]
)
param = paddle.load(weight_path)
model.load_dict(param)
return model
def alexnet(pretrained=False, **kwargs):
"""AlexNet model from
`"ImageNet Classification with Deep Convolutional Neural Networks"
<https://proceedings.neurips.cc/paper/2012/file/c399862d3b9d6b76c8436e924a68c45b-Paper.pdf>`_.
Args:
pretrained (bool, optional): Whether to load pre-trained weights. If True, returns a model pre-trained
on ImageNet. Default: False.
**kwargs (optional): Additional keyword arguments. For details, please refer to :ref:`AlexNet <api_paddle_vision_AlexNet>`.
Returns:
:ref:`api_paddle_nn_Layer`. An instance of AlexNet model.
Examples:
.. code-block:: python
import paddle
from paddle.vision.models import alexnet
# build model
model = alexnet()
# build model and load imagenet pretrained weight
# model = alexnet(pretrained=True)
x = paddle.rand([1, 3, 224, 224])
out = model(x)
print(out.shape)
# [1, 1000]
"""
return _alexnet('alexnet', pretrained, **kwargs)
| [
"noreply@github.com"
] | noreply@github.com |
c7cf91c46235f4152c9293077d627cce2c1d7f38 | 930c5bacbe08d287fa732fb8b5f46391ccce548e | /blog/blogpost/urls.py | e0ddf1f286eb7f3c168186f426451d891e810af8 | [] | no_license | Gourab342/DjangoRECIPEBlogProject | 1f4c4dbfc5ae637070f8c4d1b182eb85da4063b3 | 2d0c248f48230be384180506e0c8ecd90b7f2896 | refs/heads/master | 2023-04-04T10:26:50.853672 | 2021-04-15T18:17:29 | 2021-04-15T18:17:29 | 358,273,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,400 | py |
from django.urls import path
from . import views, bloggerview
from .bloggerview import EditBlogPost
urlpatterns = [
path('', views.home, name="home"),
path('register', views.register, name="register"),
path('login', views.log_in, name="login"),
path('doLogin', views.doLogin, name="doLogin"),
path('homead', views.homead, name="homead"),
path('logout', views.log_out, name="logout"),
path('posts', views.Posts, name="posts"),
path('search', views.search, name="search"),
path('contact/', views.contact, name="contact"),
path('profile/<int:pk>', views.profile, name="profile"),
path('try', views.viewtry, name="try"),
path('<int:sno>', views.recipe, name="recipe"),
path('postComment', views.postComment, name="postComment"),
path('test', views.test, name='test'),
path('regform', views.regform, name='regform'),
path('add_blogger_save/', views.add_blogger_save, name="add_bloggger_save"),
path('add_Posts_save/', bloggerview.add_Posts_save, name="add_Posts_save"),
path('Addpostform/', bloggerview.Addpostform, name="Addpostform"),
path('<int:pk>/edit', EditBlogPost.as_view(), name="editpost"),
path('<int:pk>/delete', bloggerview.DeletePost, name="deletepost"),
path('<int:pk>/confirmdel', bloggerview.delconf, name="confirmdel"),
path('category/<str:slug>', views.CategorySearch, name="category")
] | [
"malakaragourab1234@gmail.com"
] | malakaragourab1234@gmail.com |
ab41c3fd9597dfbdad46976ce2975798e0607750 | 62b834fad237a6488c130fce82611075da63b2a5 | /DCSwtich.py | b7e73c27cc5a655e6a3013d372878211b0563617 | [] | no_license | UtorYeung/dominant_contract_switch | 84a365272bacbe1b422b317feea34f0098026145 | f6c1400aefb84348ccd75c29e6e40399f93c3cee | refs/heads/master | 2021-10-10T17:16:38.859547 | 2019-01-14T13:01:11 | 2019-01-14T13:01:11 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 924 | py | # encoding: UTF-8
"""
2019.01.07 负责主力合约切换相关功能
https://github.com/mhxueshan/dominant_contract_switch
"""
from rb import SHFE_RB
CLASS_DIC = {"SHFE.RB": SHFE_RB}
class DCSwitch(object):
def __init__(self, symbol):
"""
init
:param symbol: 品种 SHFE.RB.bar.60
"""
t1 = symbol.index(".")
t2 = symbol[t1 + 1:].index(".")
key = symbol[:t1 + t2 + 1]
self.handler = CLASS_DIC[key]() if key in CLASS_DIC else None
def is_last_half_an_hour_switch(self, bar):
return self.handler.is_last_half_an_hour_switch(bar)
def is_switch_time_and_sign(self, bar):
"""
判断symbol品种在time这个时刻
:param bar: k线
:return: true/false
"""
ret = self.handler.is_time_in(bar.datetime)
if ret:
bar.__dict__[DCSwitch.SIGN] = True
return ret
| [
"mhr-68@qq.com"
] | mhr-68@qq.com |
d982e57321e175c443864bf3e6feb6695a2bf6b8 | 5b7cb6735037a27993debca0999627c25ee3f6e3 | /library-manager/library-manager/settings.py | 6f6a8c21fe71178b4e77902d68973421723cc953 | [] | no_license | Huzaifa785/library-manager-software-using-django | b311b904375be29acc455060f702ee16b69b6d0b | b569f18b06a75ed0a4a08ca16c95885799e3ab16 | refs/heads/master | 2023-01-27T15:12:31.642952 | 2020-11-22T07:09:09 | 2020-11-22T07:09:09 | 314,790,168 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,373 | py | """
Django settings for library-manager project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
import os
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'e&8vfxqt402__$r%%@cz*ic)aeee&x41tfs9=!3j#=i8dck+rj'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'accounts.apps.AccountsConfig',
'django_filters',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'whitenoise.middleware.WhiteNoiseMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'library-manager.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['library-manager/templates'],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'library-manager.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/images/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR, 'static/images')
| [
"huzaifa.coder785@gmail.com"
] | huzaifa.coder785@gmail.com |
ff4ae30a5bc2aa2818fcf1314ca8b8c98913fbaf | c8be7becd7466bd6639382156e0886fce3cfb386 | /array_list_repeat.py | cd49a02546a3accdc328440da4354653614c9424 | [] | no_license | wlgud0402/pyfiles | 864db71827aba5653d53320322eb8de8b0a5fc49 | 0e8b96c4bbfb20e1b5667ce482abe75061662299 | refs/heads/master | 2021-02-28T00:42:51.321207 | 2020-03-09T10:48:52 | 2020-03-09T10:48:52 | 245,648,824 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 393 | py | #반복문을 사용한 리스트 생성
array = []
for i in range(0,20,2):
array.append(i * i)
print(array)
print()
#리스트 안에 for문 사용하기
list_a = [z * z for z in range(0, 20, 2)] #최종결과를 앞에 작성 z*z
print(list_a)
print()
#if문도 추가하기
newarray = [1,2,3,4,5,6,7,8,9]
output = [number for number in newarray if number != 3]
print(output) | [
"wlgudrlgus@naver.com"
] | wlgudrlgus@naver.com |
59c206ec9c74dc43ae1396d3c86191d5c9202576 | ccfda3333bf17dc83cd094ead340daedd21e9426 | /src/module_and_package/module/module.py | 7fb46c64ed7c38ce7ddec6dcc4ac1d63a924f6be | [] | no_license | rajivmanivannan/learning-python | cbf3c1231df4af62e3d64a35d468ddff58c9e95f | ca91767226882cf104ca81cb2741168aa374f078 | refs/heads/master | 2021-02-10T08:30:48.514452 | 2020-03-15T15:50:43 | 2020-03-15T15:50:43 | 244,365,644 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,553 | py | #!/usr/bin/env python3
# encoding= utf-8
"""
Python Modules
Module is nothing but a code library.
A file containing a set of functions you want to include in your application.
"""
import sys
sys.path.append('/src/module_and_package/module')
import arithmetic
print(arithmetic.add(1,2))
# Create an alias when you import a module,by using the as keyword:
import arithmetic as ao
print(ao.sub(5,2))
# Import a method from a module
from arithmetic import add
print(add(1,8))
"""
# Standard library imports
import datetime
import osh
# Third party imports
from flask import Flask
# Local application imports
from local_module import local_class
from local_package import local_function
"""
# .pyc: This is the compiled bytecode. If you import a module, python will build a *.pyc file
# that contains the bytecode to make importing it again later easier (and faster).
"""
Python Module and Package
Module is a single file (or files) that are imported under one import and used.
#import arithmetic
Package is a collection of modules in directories that give a package hierarchy.
#from my_package.timing.internets import function_name_x
"""
"""
Python PIP
PIP is a package manager for Python packages, or modules.
It will download the packages or modules from the following repository.
https://pypi.org
# To see the PIP version
# pip --version
# To download and install package
# pip install <packageName>
# To uninstall the package
# pip uninstall <packageName>
# To List the all installed package in the system
# pip list
"""
| [
"rajivroczzz@gmail.com"
] | rajivroczzz@gmail.com |
4053abd93af9ad0c0526a0ef1774e9fd4a1981cc | 97d78f39d39abcc54b1e71dea5338783fd1e27d6 | /userapp/models.py | 7716197c59e40177aa812a6cc2333b88a344f2a0 | [] | no_license | AmitMhamunkar/FoodMarket | 00242cd373fe98d65f32e4eacb996848c737a0be | 449528ae1070296b32d3e914a7b9b9c1d9882ddc | refs/heads/master | 2022-09-18T14:17:06.206568 | 2020-06-02T08:26:19 | 2020-06-02T08:26:19 | 268,738,764 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | from django.db import models
# Create your models here.
class UserModel(models.Model):
#utypes=[('User','User'),('Admin','Admin')]
#name=models.CharField(max_length=30,default='NA')
#addr=models.CharField(max_length=30,default='NA')
#contact=models.IntegerField(default=0)
email=models.CharField(max_length=30)
password=models.CharField(max_length=20)
#utype = models.CharField(max_length=15,choices=utypes)
def __str__(self):
return "{0} {1}".format(self.email,self.password)
class AdminModel(models.Model):
email=models.CharField(max_length=30)
password=models.CharField(max_length=20)
def __str__(self):
return "{0} {1}".format(self.email,self.password) | [
"amitmhamunkar100@gmail.com"
] | amitmhamunkar100@gmail.com |
204fe9e94a0a9b3130a44e0363116f41f550c9d7 | df5ec9882071b9d17a3b4a1f044dec2925897f26 | /euler3.py | ca100ffeeb3e504aa637a083d19de6d048bf5a8e | [] | no_license | way0utwest/projecteuler | df2dae0e751aeba971107c6abf1e53ad6bafb3ed | e6db912e579b35d8289ef985c99ac61d3b8db6ac | refs/heads/main | 2021-09-21T03:55:46.471728 | 2021-08-12T23:46:42 | 2021-08-12T23:46:42 | 136,180,616 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 864 | py | import sys
def GetFactors(bignumber):
factors = []
potentialfactor = 2
while potentialfactor < (bignumber / 2):
largefactor = bignumber // potentialfactor
if bignumber % potentialfactor == 0:
if IsPrime(largefactor):
if (largefactor) not in factors:
factors.append(largefactor)
return factors
potentialfactor += 1
return factors
def IsPrime(number):
if number <= 3:
return True
if (number % 2 == 0) or (number % 3 == 0):
return False
i = 5
while(i * i < number):
if number%i == 0 or (number%(i+2) == 0):
return False
i = i + 6
return True
if __name__ == "__main__":
para = int(sys.argv[1])
print(para, GetFactors(para))
#for i in range(25):
# print(i, IsPrime(i))
| [
"sjones@sqlservercentral.com"
] | sjones@sqlservercentral.com |
8ea079a13dc4c3ceec9f19bfd3a0c095d81c8aba | 99dd24de1ab5e6a35afda75114f1d61a58c3a432 | /firstever.py | 6b4155352cf2a6dcfa78886362085f56ca652d31 | [] | no_license | mkulg/testrepo | b085f22bac67e08694a0dc770cb6feb9ef051f4c | 821de2f953c823dd6ae1384cb46f1f6a6643f65d | refs/heads/main | 2023-07-08T03:04:59.758760 | 2021-08-02T05:11:04 | 2021-08-02T05:11:04 | 391,821,827 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 40 | py | # Display text
print("New python file")
| [
"noreply@github.com"
] | noreply@github.com |
df959fbdab79122916da6992d481ab1ac1b5c61c | f74ce4b0c4049e2a776e50eadb0ca1648153ce3b | /assignments/Loops/loops5.py | d7f4e1cf558d76ba63a8c0279afbd8d308f6f4d0 | [] | no_license | Hallldor/school_projects | 91edd54d85592ce38310bcbc0c1102cccfc616fb | ff9f31d0077df985818d64c4086c79632356c15e | refs/heads/master | 2022-01-28T16:46:10.538279 | 2018-11-01T14:32:16 | 2018-11-01T14:32:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 108 | py | my_int = int(input("Insert a number "))
while my_int > 0:
print (my_int)
my_int -= 1
print("Boom!")
| [
"halldorv18@ru.is"
] | halldorv18@ru.is |
f77f0775639b709b2c1b351766ef9414bff640e3 | 4fa19bf991a2eeda6f61d6b1c612fd2d3d5df873 | /backward_warp.py | 46c9e5253120eaf9b339f9865b0f5c990ae1785e | [] | no_license | wbhu/warping-torch | 4a27963e28a5eb27571c98d5f4d19e2af28365f8 | 4e5b27f5a5d9b0157bf004dd321cadec9248310f | refs/heads/master | 2022-11-12T06:23:50.111854 | 2020-07-04T07:35:46 | 2020-07-04T07:35:46 | 277,061,583 | 4 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,589 | py | #!/usr/bin/env python
"""
File Name : warping-torch-backward_warp
date : 4/7/2020
Author : wenbo
Email : huwenbodut@gmail.com
Description :
_ _
( |---/ )
) . . (
________________________,--._(___Y___)_,--._______________________
`--' `--'
"""
import torch.nn as nn
from torch.nn import functional as F
import torch
import numpy as np
class BackwardWarp(nn.Module):
def __init__(self, height=256, width=256, cuda=True):
super(BackwardWarp, self).__init__()
self.H = height
self.W = width
remapW, remapH = np.meshgrid(np.arange(width), np.arange(height))
reGrid = np.stack((2.0 * remapW / max(width - 1, 1) - 1.0, 2.0 * remapH / max(height - 1, 1) - 1.0), axis=-1)
reGrid = reGrid[np.newaxis, ...]
self.grid = torch.from_numpy(reGrid.astype(np.float32))
self.cuda = cuda
def forward(self, x, flow):
# x is img, in N*C*H*W format
# flow is in N*2*H*W format
# flow[:,0,:,:] is the W direction (X axis) flow map !!
flow_tmp = flow.clone()
flow_tmp[:, 0, :, :] /= self.W
flow_tmp[:, 1, :, :] /= self.H
if self.cuda:
grid = self.grid.cuda(flow_tmp.get_device()) + 2.0 * flow_tmp.permute(0, 2, 3, 1)
else:
grid = self.grid + 2.0 * flow_tmp.permute(0, 2, 3, 1)
return F.grid_sample(x, grid, padding_mode='zeros', mode='bilinear', align_corners=True)
| [
"huwenbodut@gmail.com"
] | huwenbodut@gmail.com |
bce103dc7b6556103bcf4ee078ccb63cf0a269bc | 95f7ec6df7721b5de9a5667b214f4422fb52f56a | /tavern/util/loader/load_case.py | 81bb34579b4aed81e856f8f4b65b28eae3dfa898 | [
"MIT"
] | permissive | BangWork/tavern | 4ba1f9505c7593079be294788732b13f585af9d2 | 050308841461894a28b07bd2ece85a9b48ff2df4 | refs/heads/master | 2020-04-12T13:56:04.454198 | 2019-10-12T10:03:03 | 2019-10-12T10:03:03 | 162,536,746 | 0 | 0 | MIT | 2019-07-08T07:01:35 | 2018-12-20T06:30:48 | Python | UTF-8 | Python | false | false | 462 | py | import logging
import os.path
from .yaml_loader import IncludeLoader
from .path_loader import yaml_loader
logger = logging.getLogger(__name__)
def construct_include(loader, node):
"""Include file referenced at node."""
# pylint: disable=protected-access
file_path = loader.construct_scalar(node)
file_path = os.path.join(loader._root, file_path)
return yaml_loader(file_path)
IncludeLoader.add_constructor("!include", construct_include)
| [
"binavid.chen@gmail.com"
] | binavid.chen@gmail.com |
b7b1acbddef53902013733177a39e8a8aef6c88d | 19e2634115335fb980297b5d8534337645b55f66 | /Assignment 4/editsave.py | 6b733d095f34083ff70c544ce06e41540c307528 | [] | no_license | DavidVan/CECS-478H | 89977429d517a60a9600facda78bf5f25621156f | cfe2142b66377bdc18eaf87079080eb68076b6fc | refs/heads/master | 2021-11-29T08:32:05.926249 | 2021-11-28T07:20:56 | 2021-11-28T07:20:56 | 122,380,052 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,074 | py | import struct
def main():
with open('SAVED.GAM', 'r+b') as save:
print('Editing Ultima 5 Save File...\n')
str = int(input('Enter the desired amount of STR: '))
int_ = int(input('Enter the desired amount of INT: '))
dex = int(input('Enter the desired amount of DEX: '))
hp = int(input('Enter the desired amount of HP: '))
hm = int(input('Enter the desired amount of HM (Max HP): '))
exp = int(input('Enter the desired amount of EXP: '))
gold = int(input('Enter the desired amount of GOLD: '))
key = int(input('Enter the desired amount of keys: '))
skullkey = int(input('Enter the desired amount of skull keys: '))
blackbadge = int(input('Enter the desired amount of black badges: '))
magiccarpet = int(input('Enter the desired amount of magic carpets: '))
magicaxe = int(input('Enter the desired amount of magic axes: '))
offset = 2
for _ in range(16):
save.seek(offset + 12) # 14
save.write(struct.pack('B', str))
save.seek(offset + 13) # 15
save.write(struct.pack('B', dex))
save.seek(offset + 14) # 16
save.write(struct.pack('B', int_))
save.seek(offset + 16) # 18
save.write(struct.pack('H', hp))
save.seek(offset + 18) # 20
save.write(struct.pack('H', hm))
save.seek(offset + 20) # 22
save.write(struct.pack('H', exp))
offset += 32
save.seek(516)
save.write(struct.pack('H', gold))
save.seek(518)
save.write(struct.pack('B', key))
save.seek(523)
save.write(struct.pack('B', skullkey))
save.seek(536)
save.write(struct.pack('B', blackbadge))
save.seek(522)
save.write(struct.pack('B', magiccarpet))
save.seek(576)
save.write(struct.pack('B', magicaxe))
save.seek(693)
save.write(struct.pack('B', 6)) # Make it so all 6 party members appear.
if __name__ == "__main__":
main() | [
"David@DavidVan.net"
] | David@DavidVan.net |
af1b04d6cf97703519e4498002d19f6698381301 | 5c8139f1e57e06c7eaf603bd8fe74d9f22620513 | /PartB/Py判断是否为合理的括号.py | 2a3605334e6226a0c403baec737ed955220c4db7 | [] | no_license | madeibao/PythonAlgorithm | c8a11d298617d1abb12a72461665583c6a44f9d2 | b4c8a75e724a674812b8a38c0202485776445d89 | refs/heads/master | 2023-04-03T07:18:49.842063 | 2021-04-11T12:02:40 | 2021-04-11T12:02:40 | 325,269,130 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 356 | py |
class Solution:
def isValid(self, s: str) -> bool:
dic = {'{': '}', '[': ']', '(': ')', '?': '?'}
stack = ['?']
for c in s:
if c in dic: stack.append(c)
elif dic[stack.pop()] != c: return False
return len(stack) == 1
if __name__ == '__main__':
s = Solution()
print(s.isValid("(){}"))
| [
"2901429479@qq.com"
] | 2901429479@qq.com |
1131b28b0a153d0d74427cea61cfce5a5b8d28f4 | 90cc37b6cc67bce397411631587a14be72085d2e | /tests/unit/test_deployment.py | 6cda651e7bc2a1406caa35b50b68d8220d34e492 | [
"Unlicense"
] | permissive | michaeljoseph/righteous | 49d36bb895945a26d5db4b3d13a2e303aef3ef93 | ba95c574a94df85aca33397cc77b053e7f545705 | refs/heads/master | 2016-09-06T11:01:57.478168 | 2013-10-17T11:00:27 | 2013-10-17T11:00:27 | 2,584,142 | 2 | 1 | null | 2013-10-18T14:53:04 | 2011-10-16T00:07:14 | Python | UTF-8 | Python | false | false | 2,389 | py | from righteous.compat import urlencode
from .base import ApiTestCase
import righteous
class DeploymentTestCase(ApiTestCase):
def setUp(self):
self.setup_patching('righteous.api.deployment._request')
super(DeploymentTestCase, self).setUp()
def test_list_deployments(self):
righteous.init(
'user', 'pass', 'account_id', default_deployment_id='foo')
self.response.content = '{}'
righteous.list_deployments()
self.request.assert_called_once_with('/deployments.js')
def test_find_deployment_no_result(self):
self.response.content = '[]'
deployment = righteous.find_deployment('bruce')
request_url = '/deployments.js?filter=nickname=bruce'
self.request.assert_called_once_with(request_url)
assert not deployment
def test_deployment_info(self):
self.response.content = '{}'
righteous.deployment_info('/deployment/ref')
self.request.assert_called_once_with(
'/deployment/ref.js', prepend_api_base=False)
def test_create_deployment(self):
self.response.status_code = 201
self.response.headers['location'] = '/deployment/new_ref'
nickname = 'devops'
description = 'devops deployment'
create_data = {
'deployment[nickname]': nickname,
'deployment[description]': description,
}
expected = urlencode(create_data)
success, location = righteous.create_deployment(nickname, description)
self.request.assert_called_once_with(
'/deployments', method='POST', body=expected)
assert success
self.assertEqual(location, '/deployment/new_ref')
def test_delete_deployment(self):
self.response.content = '{}'
assert righteous.delete_deployment('/deployment/ref')
self.request.assert_called_once_with(
'/deployment/ref', method='DELETE', prepend_api_base=False)
def test_duplicate_deployment(self):
self.response.status_code = 201
self.response.headers['location'] = '/deployment/new_ref'
success, location = righteous.duplicate_deployment('/deployment/ref')
assert success
self.request.assert_any_call(
'/deployment/ref/duplicate', method='POST', prepend_api_base=False)
self.assertEqual(location, '/deployment/new_ref')
| [
"michaeljoseph+github@gmail.com"
] | michaeljoseph+github@gmail.com |
0ac74b8fcc292adb87b359e8d815025625d1b6c4 | e6208febf7e34d4108422c6da54453373733a421 | /sdks/python/client/argo_workflows/model/io_argoproj_workflow_v1alpha1_workflow_create_request.py | c0a6b77db6997e8fac5c7c8803d195ea356bad80 | [
"Apache-2.0"
] | permissive | wreed4/argo | 05889e5bb7738d534660c58a7ec71c454e6ac9bb | 41f94310b0f7fee1ccd533849bb3af7f1ad4f672 | refs/heads/master | 2023-01-22T05:32:12.254485 | 2022-01-27T21:24:45 | 2022-01-27T22:02:22 | 233,143,964 | 0 | 0 | Apache-2.0 | 2023-01-17T19:04:43 | 2020-01-10T22:56:25 | Go | UTF-8 | Python | false | false | 12,633 | py | """
Argo Server API
You can get examples of requests and responses by using the CLI with `--gloglevel=9`, e.g. `argo list --gloglevel=9` # noqa: E501
The version of the OpenAPI document: VERSION
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from argo_workflows.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from argo_workflows.exceptions import ApiAttributeError
def lazy_import():
from argo_workflows.model.create_options import CreateOptions
from argo_workflows.model.io_argoproj_workflow_v1alpha1_workflow import IoArgoprojWorkflowV1alpha1Workflow
globals()['CreateOptions'] = CreateOptions
globals()['IoArgoprojWorkflowV1alpha1Workflow'] = IoArgoprojWorkflowV1alpha1Workflow
class IoArgoprojWorkflowV1alpha1WorkflowCreateRequest(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'create_options': (CreateOptions,), # noqa: E501
'instance_id': (str,), # noqa: E501
'namespace': (str,), # noqa: E501
'server_dry_run': (bool,), # noqa: E501
'workflow': (IoArgoprojWorkflowV1alpha1Workflow,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'create_options': 'createOptions', # noqa: E501
'instance_id': 'instanceID', # noqa: E501
'namespace': 'namespace', # noqa: E501
'server_dry_run': 'serverDryRun', # noqa: E501
'workflow': 'workflow', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1WorkflowCreateRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
create_options (CreateOptions): [optional] # noqa: E501
instance_id (str): This field is no longer used.. [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
server_dry_run (bool): [optional] # noqa: E501
workflow (IoArgoprojWorkflowV1alpha1Workflow): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, *args, **kwargs): # noqa: E501
"""IoArgoprojWorkflowV1alpha1WorkflowCreateRequest - a model defined in OpenAPI
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
create_options (CreateOptions): [optional] # noqa: E501
instance_id (str): This field is no longer used.. [optional] # noqa: E501
namespace (str): [optional] # noqa: E501
server_dry_run (bool): [optional] # noqa: E501
workflow (IoArgoprojWorkflowV1alpha1Workflow): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| [
"noreply@github.com"
] | noreply@github.com |
b8a299a0c761353597d9f121dc59f6a0462ecd45 | 6d0f9f6790fb0451361bbcfb13706c85c6dcfdbc | /serpentTools/tests/test_ResultsReader.py | a8924952ce78b5ffc4ca69372ba9171b66de0116 | [
"MIT"
] | permissive | gonuke/serpent-tools | 4d3fae807b9fce9e0f82632e6c9eada3b9c6bba0 | ce9df61484d8289f91bbd5224eba5ea5569423e5 | refs/heads/master | 2020-03-22T07:07:57.280551 | 2018-06-11T12:49:52 | 2018-06-11T12:49:52 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 24,803 | py | """Test the results reader."""
import os
import unittest
import numpy
import six
from serpentTools.settings import rc
from serpentTools.tests import TEST_ROOT
from serpentTools.parsers import ResultsReader
from serpentTools.messages import SerpentToolsException
class TestBadFiles(unittest.TestCase):
"""
Test bad files.
Tests:
1. test_noResults: file with no results
2. test_noUniverses: file with no universes
Raises SerpentToolsException
"""
def test_noResults(self):
"""Verify that the reader raises error when no results exist in the file"""
badFile = os.path.join(TEST_ROOT, 'bad_results_file.m')
with open(badFile, 'w') as badObj:
for _line in range(5):
badObj.write(str(_line))
badReader = ResultsReader(badFile)
with self.assertRaises(SerpentToolsException):
badReader.read()
os.remove(badFile)
def test_noUniverses(self):
"""Verify that the reader raises an error if no universes are stored on the file"""
univFile = os.path.join(TEST_ROOT, 'pwr_res_noUniv.m')
univReader = ResultsReader(univFile)
with self.assertRaises(SerpentToolsException):
univReader.read()
class TestEmptyAttributes(unittest.TestCase):
"""
Test a case, in which some results do exist in the file,
however the read procedure assigns no results into the attributes.
Hence metadata, resdata and universes are all empty
Raises SerpentToolsException
"""
def test_emptyAttributes(self):
"""Verify that the reader raises error when all attributes are empty"""
testFile = os.path.join(TEST_ROOT, 'pwr_res_emptyAttributes.m')
with self.assertRaises(SerpentToolsException):
with rc:
rc['xs.variableExtras'] = ['GC_UNIVERSE_NAME']
testReader = ResultsReader(testFile)
testReader.read()
class TestGetUniv(unittest.TestCase):
"""
Test the getUniv method.
Tests:
1. test_allVarsNone: burnup, index and timeDays are all set to None
2. test_nonPostiveIndex: index is zero or negative
3. test_noUnivState: define ('0',bu,idx,days) a non-existing state
4. test_validUniv: test that a valid universe state contains proper data
Raises SerpentToolsException
All variables are set to None
KeyError
index is non-positive
no universe state exist in the reader
"""
def setUp(self):
self.file = os.path.join(TEST_ROOT, 'pwr_res.m')
with rc:
rc['serpentVersion'] = '2.1.29'
rc['xs.variableGroups'] = ['versions', 'gc-meta', 'xs',
'diffusion', 'eig', 'burnup-coeff']
rc['xs.getInfXS'] = True # only store inf cross sections
rc['xs.getB1XS'] = False
self.reader = ResultsReader(self.file)
self.reader.read()
self.expectedinfValAbs = numpy.array([1.05040E-02, 1.23260E-01])
def test_allVarsNone(self):
"""Verify that the reader raises error when no time parameters are given"""
with self.assertRaises(SerpentToolsException):
self.reader.getUniv('0', burnup=None, index=None, timeDays=None)
def test_nonPostiveIndex(self):
"""Verify that the reader raises error when the time index is not positive"""
with self.assertRaises(KeyError):
self.reader.getUniv('0', burnup=None, index=0, timeDays=None)
def test_noUnivState(self):
"""Verify that the reader raises error when the state tuple does not exist"""
with self.assertRaises(KeyError):
self.reader.getUniv('0', burnup=50, index=10, timeDays=5)
def test_validUniv(self):
"""Verify that the reader raises error when the state tuple does not exist"""
xsDict = self.reader.getUniv('0', burnup=0.0, index=1, timeDays=0.0)
numpy.testing.assert_equal(xsDict.infExp['infAbs'],
self.expectedinfValAbs)
class TesterCommonResultsReader(unittest.TestCase):
"""
Class with common tests for the results reader.
Expected failures/errors:
1. test_varsMatchSettings:
compares the keys
defined by the user to those
obtained by the reader
Raises SerpentToolsException
2. test_metadata:
Check that metadata variables and
their values are properly stored
Raises SerpentToolsException
3. test_resdata:
Check that time-dependent results variables
and their values are properly stored
Raises SerpentToolsException
4. test_universes:
Check that expected states are read
i.e., ('univ', bu, buIdx, days)
For a single state, check that
infExp keys and values are stored.
Check that infUnc and metadata are properly stored
Raises SerpentToolsException
"""
def test_varsMatchSettings(self):
"""Verify that the obtained variables match the settings."""
self.assertSetEqual(self.expVarSettings, self.reader.settings['variables'])
def test_metadata(self):
"""Verify that user-defined metadata is properly stored."""
expectedKeys = set(self.expectedMetadata)
actualKeys = set(self.reader.metadata.keys())
self.assertSetEqual(expectedKeys, actualKeys)
for key, expectedValue in six.iteritems(self.expectedMetadata):
if isinstance(expectedValue, str):
self.assertSetEqual(set(self.reader.metadata[key]),
set(expectedValue))
else:
numpy.testing.assert_equal(self.reader.metadata[key],
expectedValue)
def test_resdata(self):
"""Verify that user-defined metadata is properly stored."""
expectedKeys = self.expectedResdata
actualKeys = set(self.reader.resdata.keys())
self.assertSetEqual(expectedKeys, actualKeys)
numpy.testing.assert_equal(self.reader.resdata['absKeff'],
self.expectedKeff)
try:
numpy.testing.assert_equal(self.reader.resdata['burnDays'],
self.expectedDays)
except:
numpy.testing.assert_equal([], self.expectedDays)
def test_universes(self):
"""Verify that results for all the states ('univ', bu, buIdx, days) exist.
Verify that the containers for each state are properly created
and that the proper information is stored, e.g. infExp keys and values"""
expSt0 = self.expectedStates[0]
actualStates = set(self.reader.universes.keys())
self.assertSetEqual(set(self.expectedStates), actualStates) # check that all states are read
self.assertSetEqual(set(self.reader.universes[expSt0].infExp.keys()),
self.expectedInfExp)
self.assertSetEqual(set(self.reader.universes[expSt0].gc.keys()),
self.expectedUnivgcData)
numpy.testing.assert_equal(self.reader.universes[expSt0].infExp['infFlx'],
self.expectedInfVals)
numpy.testing.assert_equal(self.reader.universes[expSt0].infUnc['infFlx'],
self.expectedInfUnc)
numpy.testing.assert_equal(self.reader.universes[expSt0].gc['cmmTranspxs'],
self.expectedCMM)
numpy.testing.assert_equal(self.reader.universes[expSt0].gcUnc['cmmTranspxs'],
self.expectedCMMunc)
numpy.testing.assert_equal(self.reader.universes[expSt0].groups,
self.expectedGroups)
numpy.testing.assert_equal(self.reader.universes[expSt0].microGroups,
self.expectedMicroGroups)
class TestFilterResults(TesterCommonResultsReader):
"""
Test the ability to read and filter data.
Expected outcome:
1. test_varsMatchSettings:
Results read are equal to results set
2. test_metadata:
metadata is filtered
3. test_resdata:
resdata is filtered
4. test_universes:
univ is filtered
"""
def setUp(self):
self.file = os.path.join(TEST_ROOT, 'pwr_res.m')
# universe id, burnup, step, days
self.expectedStates = (('0', 0.0, 1, 0.0), ('0', 500, 2, 5.0))
with rc:
rc['serpentVersion'] = '2.1.29'
rc['xs.variableGroups'] = ['versions', 'gc-meta', 'xs',
'diffusion', 'eig', 'burnup-coeff']
rc['xs.getInfXS'] = True # only store inf cross sections
rc['xs.getB1XS'] = False
self.reader = ResultsReader(self.file)
self.reader.read()
self.expVarSettings = set({'VERSION', 'COMPILE_DATE', 'DEBUG', 'TITLE',
'CONFIDENTIAL_DATA', 'INPUT_FILE_NAME', 'WORKING_DIRECTORY',
'HOSTNAME', 'CPU_TYPE', 'CPU_MHZ', 'START_DATE', 'COMPLETE_DATE',
'GC_UNIVERSE_NAME', 'MICRO_NG', 'MICRO_E', 'MACRO_NG',
'MACRO_E', 'INF_MICRO_FLX','INF_KINF', 'INF_FLX',
'INF_FISS_FLX', 'TOT', 'CAPT', 'ABS', 'FISS', 'NSF',
'NUBAR', 'KAPPA', 'INVV', 'TRANSPXS', 'DIFFCOEF', 'RABSXS',
'REMXS', 'SCATT0', 'SCATT1', 'SCATT2', 'SCATT3', 'SCATT4',
'SCATT5', 'SCATT6', 'SCATT7', 'S0', 'S1', 'S2', 'S3', 'S4',
'S5', 'S6', 'S7', 'CHIT', 'CHIP', 'CHID', 'CMM_TRANSPXS',
'CMM_TRANSPXS_X', 'CMM_TRANSPXS_Y', 'CMM_TRANSPXS_Z',
'CMM_DIFFCOEF', 'CMM_DIFFCOEF_X', 'CMM_DIFFCOEF_Y',
'CMM_DIFFCOEF_Z', 'ANA_KEFF', 'IMP_KEFF', 'COL_KEFF',
'ABS_KEFF', 'ABS_KINF', 'GEOM_ALBEDO', 'BURN_MATERIALS',
'BURN_MODE', 'BURN_STEP', 'BURNUP', 'BURN_DAYS',
'COEF_IDX', 'COEF_BRANCH', 'COEF_BU_STEP'})
self.expectedMetadata = {'version': 'Serpent 2.1.29',
'compileDate': 'Jan 4 2018 17:22:46',
'debug': [0.],
'title': 'pwr pin',
'confidentialData': [0.],
'inputFileName': 'pwrPin',
'workingDirectory': '/home/ajohnson400/research/gpt-dep/testing/depmtx',
'hostname': 'ME04L0358GRD04',
'cpuType': 'Intel(R) Core(TM) i7-6700T CPU @ 2.80GHz',
'cpuMhz': [194.],
'startDate': 'Mon Feb 19 15:39:23 2018',
'completeDate': 'Mon Feb 19 15:39:53 2018'}
self.expectedResdata = set(['absKeff', 'absKinf', 'anaKeff', 'burnDays', 'burnMaterials', 'burnMode', 'burnStep',
'burnup', 'colKeff', 'geomAlbedo', 'impKeff', 'nubar'])
self.expectedKeff = numpy.array([[9.91938E-01, 0.00145],[1.81729E-01, 0.00240]])
self.expectedDays = numpy.array([[0.00000E+00], [5.00000E+00]])
self.expectedInfExp= set(['infAbs', 'infCapt', 'infChid', 'infChip', 'infChit', 'infDiffcoef', 'infFiss', 'infFissFlx',
'infFlx', 'infInvv', 'infKappa', 'infKinf', 'infMicroFlx', 'infNsf', 'infNubar', 'infRabsxs',
'infRemxs', 'infS0', 'infS1', 'infS2', 'infS3', 'infS4', 'infS5', 'infS6', 'infS7',
'infScatt0', 'infScatt1', 'infScatt2', 'infScatt3', 'infScatt4', 'infScatt5', 'infScatt6',
'infScatt7', 'infTot', 'infTranspxs'])
self.expectedUnivgcData = set(['cmmDiffcoef', 'cmmDiffcoefX', 'cmmDiffcoefY', 'cmmDiffcoefZ', 'cmmTranspxs', 'cmmTranspxsX',
'cmmTranspxsY', 'cmmTranspxsZ'])
self.expectedCMM = numpy.array([2.23062E-01, 6.55491E-01])
self.expectedCMMunc = numpy.array([0.00144, 0.03837])
self.expectedMicroGroups = numpy.array([1.00000E-11, 5.00000E-09, 1.00000E-08, 1.50000E-08, 2.00000E-08, 2.50000E-08,
3.00000E-08, 3.50000E-08, 4.20000E-08, 5.00000E-08, 5.80000E-08, 6.70000E-08,
8.00000E-08, 1.00000E-07, 1.40000E-07, 1.80000E-07, 2.20000E-07, 2.50000E-07,
2.80000E-07, 3.00000E-07, 3.20000E-07, 3.50000E-07, 4.00000E-07, 5.00000E-07,
6.25000E-07, 7.80000E-07, 8.50000E-07, 9.10000E-07, 9.50000E-07, 9.72000E-07,
9.96000E-07, 1.02000E-06, 1.04500E-06, 1.07100E-06, 1.09700E-06, 1.12300E-06,
1.15000E-06, 1.30000E-06, 1.50000E-06, 1.85500E-06, 2.10000E-06, 2.60000E-06,
3.30000E-06, 4.00000E-06, 9.87700E-06, 1.59680E-05, 2.77000E-05, 4.80520E-05,
7.55014E-05, 1.48728E-04, 3.67262E-04, 9.06898E-04, 1.42510E-03, 2.23945E-03,
3.51910E-03, 5.50000E-03, 9.11800E-03, 1.50300E-02, 2.47800E-02, 4.08500E-02,
6.74300E-02, 1.11000E-01, 1.83000E-01, 3.02500E-01, 5.00000E-01, 8.21000E-01,
1.35300E+00, 2.23100E+00, 3.67900E+00, 6.06550E+00, 2.00000E+01])
self.expectedGroups = numpy.array([1.00000E+37, 6.25000E-07, 0.00000E+00])
self.expectedInfVals = numpy.array([2.46724E+18, 2.98999E+17])
self.expectedInfUnc = numpy.array([0.00115, 0.00311])
class TestReadAllResults(TesterCommonResultsReader):
"""
Read the full results file and do NOT filter.
Note:
The file was manually filtered to include
only the variables from 'TestFilterResults' class
No settings were defined and hence the reader
should read everything.
Expected outcome:
- Same variables and values as in the 'TestFilterResults' class
1. test_varsMatchSettings:
Results read are equal to results set
2. test_metadata:
metadata is not filtered
3. test_resdata:
resdata is not filtered
4. test_universes:
univ is not filtered
"""
def setUp(self):
self.file = os.path.join(TEST_ROOT, 'pwr_res_filter.m')
# universe id, burnup, step, days
with rc:
rc['serpentVersion'] = '2.1.29'
self.expectedStates = (('0', 0.0, 1, 0.0), ('0', 500, 2, 5.0))
self.reader = ResultsReader(self.file)
self.reader.read()
self.expVarSettings = set()
self.expectedMetadata = {'version': 'Serpent 2.1.29',
'compileDate': 'Jan 4 2018 17:22:46',
'debug': [0.],
'title': 'pwr pin',
'confidentialData': [0.],
'inputFileName': 'pwrPin',
'workingDirectory': '/home/ajohnson400/research/gpt-dep/testing/depmtx',
'hostname': 'ME04L0358GRD04',
'cpuType': 'Intel(R) Core(TM) i7-6700T CPU @ 2.80GHz',
'cpuMhz': [194.],
'startDate': 'Mon Feb 19 15:39:23 2018',
'completeDate': 'Mon Feb 19 15:39:53 2018'}
self.expectedResdata = set(['absKeff', 'absKinf', 'anaKeff', 'burnDays', 'burnMaterials', 'burnMode', 'burnStep',
'burnup', 'colKeff', 'geomAlbedo', 'impKeff', 'nubar', 'minMacroxs'])
self.expectedKeff = numpy.array([[9.91938E-01, 0.00145],[1.81729E-01, 0.00240]])
self.expectedDays = numpy.array([[0.00000E+00], [5.00000E+00]])
self.expectedInfExp= set(['infAbs', 'infCapt', 'infChid', 'infChip', 'infChit', 'infDiffcoef', 'infFiss', 'infFissFlx',
'infFlx', 'infInvv', 'infKappa', 'infKinf', 'infMicroFlx', 'infNsf', 'infNubar', 'infRabsxs',
'infRemxs', 'infS0', 'infS1', 'infS2', 'infS3', 'infS4', 'infS5', 'infS6', 'infS7',
'infScatt0', 'infScatt1', 'infScatt2', 'infScatt3', 'infScatt4', 'infScatt5', 'infScatt6',
'infScatt7', 'infTot', 'infTranspxs'])
self.expectedUnivgcData = set(['cmmDiffcoef', 'cmmDiffcoefX', 'cmmDiffcoefY', 'cmmDiffcoefZ', 'cmmTranspxs', 'cmmTranspxsX',
'cmmTranspxsY', 'cmmTranspxsZ'])
self.expectedCMM = numpy.array([2.23062E-01, 6.55491E-01])
self.expectedCMMunc = numpy.array([0.00144, 0.03837])
self.expectedMicroGroups = numpy.array([1.00000E-11, 5.00000E-09, 1.00000E-08, 1.50000E-08, 2.00000E-08, 2.50000E-08,
3.00000E-08, 3.50000E-08, 4.20000E-08, 5.00000E-08, 5.80000E-08, 6.70000E-08,
8.00000E-08, 1.00000E-07, 1.40000E-07, 1.80000E-07, 2.20000E-07, 2.50000E-07,
2.80000E-07, 3.00000E-07, 3.20000E-07, 3.50000E-07, 4.00000E-07, 5.00000E-07,
6.25000E-07, 7.80000E-07, 8.50000E-07, 9.10000E-07, 9.50000E-07, 9.72000E-07,
9.96000E-07, 1.02000E-06, 1.04500E-06, 1.07100E-06, 1.09700E-06, 1.12300E-06,
1.15000E-06, 1.30000E-06, 1.50000E-06, 1.85500E-06, 2.10000E-06, 2.60000E-06,
3.30000E-06, 4.00000E-06, 9.87700E-06, 1.59680E-05, 2.77000E-05, 4.80520E-05,
7.55014E-05, 1.48728E-04, 3.67262E-04, 9.06898E-04, 1.42510E-03, 2.23945E-03,
3.51910E-03, 5.50000E-03, 9.11800E-03, 1.50300E-02, 2.47800E-02, 4.08500E-02,
6.74300E-02, 1.11000E-01, 1.83000E-01, 3.02500E-01, 5.00000E-01, 8.21000E-01,
1.35300E+00, 2.23100E+00, 3.67900E+00, 6.06550E+00, 2.00000E+01])
self.expectedGroups = numpy.array([1.00000E+37, 6.25000E-07, 0.00000E+00])
self.expectedInfVals = numpy.array([2.46724E+18, 2.98999E+17])
self.expectedInfUnc = numpy.array([0.00115, 0.00311])
class TestFilterResultsNoBurnup(TesterCommonResultsReader):
"""
Test the ability to read a file with no BU steps.
Expected outcome:
1. test_varsMatchSettings:
Results read are equal to results set
2. test_metadata:
metadata is filtered
3. test_resdata:
resdata is filtered
4. test_universes:
univ is filtered
"""
def setUp(self):
self.file = os.path.join(TEST_ROOT, 'pwr_res_noBU.m')
# universe id, Idx, Idx, Idx
self.expectedStates = (('0', 1, 1, 1), ('0', 1, 1, 1))
with rc:
rc['serpentVersion'] = '2.1.30'
rc['xs.variableGroups'] = ['versions', 'gc-meta', 'xs',
'diffusion', 'eig', 'burnup-coeff']
rc['xs.getInfXS'] = True # only store inf cross sections
rc['xs.getB1XS'] = False
self.reader = ResultsReader(self.file)
self.reader.read()
self.expVarSettings = set({'VERSION', 'COMPILE_DATE', 'DEBUG', 'TITLE',
'CONFIDENTIAL_DATA', 'INPUT_FILE_NAME', 'WORKING_DIRECTORY',
'HOSTNAME', 'CPU_TYPE', 'CPU_MHZ', 'START_DATE', 'COMPLETE_DATE',
'GC_UNIVERSE_NAME', 'MICRO_NG', 'MICRO_E', 'MACRO_NG',
'MACRO_E', 'INF_MICRO_FLX','INF_KINF', 'INF_FLX',
'INF_FISS_FLX', 'TOT', 'CAPT', 'ABS', 'FISS', 'NSF',
'NUBAR', 'KAPPA', 'INVV', 'TRANSPXS', 'DIFFCOEF', 'RABSXS',
'REMXS', 'SCATT0', 'SCATT1', 'SCATT2', 'SCATT3', 'SCATT4',
'SCATT5', 'SCATT6', 'SCATT7', 'S0', 'S1', 'S2', 'S3', 'S4',
'S5', 'S6', 'S7', 'CHIT', 'CHIP', 'CHID', 'CMM_TRANSPXS',
'CMM_TRANSPXS_X', 'CMM_TRANSPXS_Y', 'CMM_TRANSPXS_Z',
'CMM_DIFFCOEF', 'CMM_DIFFCOEF_X', 'CMM_DIFFCOEF_Y',
'CMM_DIFFCOEF_Z', 'ANA_KEFF', 'IMP_KEFF', 'COL_KEFF',
'ABS_KEFF', 'ABS_KINF', 'GEOM_ALBEDO', 'BURN_MATERIALS',
'BURN_MODE', 'BURN_STEP', 'BURNUP', 'BURN_DAYS',
'COEF_IDX', 'COEF_BRANCH', 'COEF_BU_STEP'})
self.expectedMetadata = {'version': 'Serpent 2.1.30',
'compileDate': 'Apr 4 2018 08:55:27',
'debug': [0.],
'title': 'UO2 PIN MODEL',
'confidentialData': [0.],
'inputFileName': 'pwr',
'workingDirectory': '/gpfs/pace1/project/me-kotlyar/dkotlyar6/Research/Serpent_test/FP_test',
'hostname': 'rich133-c36-10-l.pace.gatech.edu',
'cpuType': 'Intel(R) Xeon(R) CPU E5-2680 v4 @ 2.40GHz',
'cpuMhz': [184549409.0],
'startDate': 'Mon May 14 11:20:06 2018',
'completeDate': 'Mon May 14 11:20:36 2018'}
self.expectedResdata = set(['absKeff', 'absKinf', 'anaKeff', 'colKeff', 'geomAlbedo', 'impKeff', 'nubar'])
self.expectedKeff = numpy.array([1.15295E+00, 0.00094])
self.expectedDays = numpy.array([])
self.expectedInfExp= set(['infAbs', 'infCapt', 'infChid', 'infChip', 'infChit', 'infDiffcoef', 'infFiss', 'infFissFlx',
'infFlx', 'infInvv', 'infKappa', 'infKinf', 'infMicroFlx', 'infNsf', 'infNubar', 'infRabsxs',
'infRemxs', 'infS0', 'infS1', 'infS2', 'infS3', 'infS4', 'infS5', 'infS6', 'infS7',
'infScatt0', 'infScatt1', 'infScatt2', 'infScatt3', 'infScatt4', 'infScatt5', 'infScatt6',
'infScatt7', 'infTot', 'infTranspxs'])
self.expectedUnivgcData = set(['cmmDiffcoef', 'cmmDiffcoefX', 'cmmDiffcoefY', 'cmmDiffcoefZ', 'cmmTranspxs', 'cmmTranspxsX',
'cmmTranspxsY', 'cmmTranspxsZ'])
self.expectedCMM = numpy.array([1.80522E-01, 4.44568E-01])
self.expectedCMMunc = numpy.array([0.00181, 0.01952])
self.expectedMicroGroups = numpy.array([1.00000E-11, 5.00000E-09, 1.00000E-08, 1.50000E-08, 2.00000E-08, 2.50000E-08,
3.00000E-08, 3.50000E-08, 4.20000E-08, 5.00000E-08, 5.80000E-08, 6.70000E-08,
8.00000E-08, 1.00000E-07, 1.40000E-07, 1.80000E-07, 2.20000E-07, 2.50000E-07,
2.80000E-07, 3.00000E-07, 3.20000E-07, 3.50000E-07, 4.00000E-07, 5.00000E-07,
6.25000E-07, 7.80000E-07, 8.50000E-07, 9.10000E-07, 9.50000E-07, 9.72000E-07,
9.96000E-07, 1.02000E-06, 1.04500E-06, 1.07100E-06, 1.09700E-06, 1.12300E-06,
1.15000E-06, 1.30000E-06, 1.50000E-06, 1.85500E-06, 2.10000E-06, 2.60000E-06,
3.30000E-06, 4.00000E-06, 9.87700E-06, 1.59680E-05, 2.77000E-05, 4.80520E-05,
7.55014E-05, 1.48728E-04, 3.67262E-04, 9.06898E-04, 1.42510E-03, 2.23945E-03,
3.51910E-03, 5.50000E-03, 9.11800E-03, 1.50300E-02, 2.47800E-02, 4.08500E-02,
6.74300E-02, 1.11000E-01, 1.83000E-01, 3.02500E-01, 5.00000E-01, 8.21000E-01,
1.35300E+00, 2.23100E+00, 3.67900E+00, 6.06550E+00, 2.00000E+01])
self.expectedGroups = numpy.array([1.00000E+37, 6.25000E-07, 0.00000E+00])
self.expectedInfVals = numpy.array([8.71807E+14, 4.80974E+13])
self.expectedInfUnc = numpy.array([0.00097, 0.00121])
del TesterCommonResultsReader
if __name__ == '__main__':
unittest.main()
| [
"noreply@github.com"
] | noreply@github.com |
b8d8957ac4a50ca4946cf286cc5e6c799228008c | d4e71e820af69168b57283fa4967c00682457fbc | /router.py | 85210eb55f9907ec69e40877551d003d4fa535df | [] | no_license | bermuda2810/envi_inference | eef436c9af61e40b1243ee36be7ea48dd7af7085 | 053275c14c314d5c15c880fb5a4828dc38b870af | refs/heads/master | 2020-06-07T03:51:29.382855 | 2019-06-20T12:05:40 | 2019-06-20T12:05:40 | 192,878,875 | 0 | 0 | null | 2019-06-20T08:30:35 | 2019-06-20T08:15:55 | Python | UTF-8 | Python | false | false | 1,046 | py | from __future__ import print_function
from future.standard_library import install_aliases
from flask import Flask, request, make_response
from flask_cors import CORS
import json
import os
from bot import translation
install_aliases()
app = Flask(__name__)
cors = CORS(app)
@app.route("/")
def index():
return "Welcome to Wisdom Seeker! 19:03"
@app.route('/api/translate', endpoint='translate', methods=['POST'])
def translate():
raw_request = request.get_json(silent=True, force=True)
input = raw_request["input"]
output = translation(input)
response = generate_response(0, output)
return response
def generate_response(code=0, output=None):
response = {'code': code, 'output': output}
res = json.dumps(response)
response = make_response(res)
response.headers['Content-Type'] = 'application/json'
return response
if __name__ == '__main__':
port = int(os.environ.get("PORT", 5000))
print("Starting app on port %d" % port)
app.run(threaded=True, debug=False, port=port,host = '0.0.0.0') | [
"vietbq@nal.vn"
] | vietbq@nal.vn |
01e81a1f99193030b8a12ff979b36ab877ecbdbd | 9dd14d428b2513376f0e1c3ec06a3b06fc60fc73 | /solution/operators/sdi_pandas_0.0.37/sdi_pandas_0.0.36/content/files/vflow/subengines/com/sap/python36/operators/sdi_pandas/sample/sample.py | 41fd2ff0d55d9875b9f252b305861c73eef12369 | [
"MIT"
] | permissive | thhapke/sdi_pandas | 38b1a3a688c62621fb858f03e4ac2f3bcbc20b88 | 7a9108007459260a30ea7ee404a76b42861c81c5 | refs/heads/master | 2020-07-24T10:40:05.643337 | 2020-04-08T06:59:52 | 2020-04-08T06:59:52 | 207,894,698 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 6,795 | py | import sdi_utils.gensolution as gs
import sdi_utils.set_logging as slog
import sdi_utils.textfield_parser as tfp
import sdi_utils.tprogress as tp
import pandas as pd
EXAMPLE_ROWS = 5
try:
api
except NameError:
class api:
class Message:
def __init__(self,body = None,attributes = ""):
self.body = body
self.attributes = attributes
def send(port,msg) :
if isinstance(msg,api.Message) :
print('Port: ', port)
print('Attributes: ', msg.attributes)
print('Body: ', str(msg.body))
else :
print(str(msg))
return msg
def call(config,msg):
api.config = config
return process(msg)
def set_port_callback(port, callback) :
df = pd.DataFrame(
{'icol': [1, 2, 3, 4, 5], 'xcol2': ['A', 'A', 'B', 'B', 'C'], \
'xcol3': ['K', 'L', 'M', 'N', 'O'], 'xcol4': ['a1', 'a1', 'b1', 'b1', 'b1']})
default_msg = api.Message(attributes = {'format': 'pandas', 'name': 'test'}, body=df)
callback(default_msg)
class config:
## Meta data
config_params = dict()
version = '0.0.17'
tags = {'pandas': '','sdi_utils':''}
operator_description = "Sample from Dataframe"
operator_description_long = "Sampling over a DataFrame but keeps datasets with the same value of the \
defined column as set and not splitting them, e.g. sampling with the invariant_column='date' samples \
but ensures that all datasets of a certain date are taken or none. This leads to the fact that the \
sample_size is only a guiding target. Depending on the size of the datasets with the same value of \
the *invariant_column* compared to the *sample_size* this could deviate a lot. "
add_readme = dict()
add_readme["References"] = "[pandas doc: sample](https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.sample.html)"
debug_mode = True
config_params['debug_mode'] = {'title': 'Debug mode',
'description': 'Sending debug level information to log port',
'type': 'boolean'}
sample_size = 0.1
config_params['sample_size'] = {'title': 'Sample size', 'description': 'Sample size', 'type': 'number'}
random_state = 1
config_params['random_state'] = {'title': 'Random state', 'description': 'Random state', 'type': 'integer'}
invariant_column = ''
config_params['invariant_column'] = {'title': 'Invariant column', 'description': 'Column where all the same value records should be kept as a whole in a sample', 'type': 'string'}
def process(msg) :
att_dict = dict()
att_dict['config'] = dict()
att_dict['operator'] = 'sample'
if api.config.debug_mode == True:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='DEBUG')
else:
logger, log_stream = slog.set_logging(att_dict['operator'], loglevel='INFO')
logger.info("Process started")
time_monitor = tp.progress()
# start custom process definition
# test if body refers to a DataFrame type
prev_att = msg.attributes
df = msg.body
if not isinstance(df, pd.DataFrame):
logger.error('Message body does not contain a pandas DataFrame')
raise TypeError('Message body does not contain a pandas DataFrame')
att_dict = dict()
att_dict['config'] = dict()
###### start calculation
sample_size = api.config.sample_size
if sample_size < 1 :
sample_size = int(sample_size * df.shape[0])
if sample_size < 1 :
sample_size = 1
logger.warning("Fraction of sample size too small. Set sample size to 1.")
elif sample_size > df.shape[0]:
logger.warning("Sample size larger than number of rows")
logger.debug("Samples_size: {}/() ({})".format(sample_size,df.shape[0],sample_size/df.shape[0]))
random_state = api.config.random_state
invariant_column = tfp.read_value(api.config.invariant_column)
if invariant_column and sample_size < df.shape[0]:
# get the average number of records for each value of invariant
sc_df = df.groupby(invariant_column)[invariant_column].count()
sample_size_invariant = int(sample_size / sc_df.mean())
sample_size_invariant = 1 if sample_size_invariant == 0 else sample_size_invariant # ensure minimum
sc_df = sc_df.sample(n=sample_size_invariant, random_state=random_state).to_frame()
sc_df.rename(columns={invariant_column: 'sum'}, inplace=True)
# sample the df by merge 2 df
df = pd.merge(df, sc_df, how='inner', right_index=True, left_on=invariant_column)
df.drop(columns=['sum'], inplace=True)
else:
df = df.sample(n=sample_size, random_state=random_state)
###### end calculation
##############################################
# final infos to attributes and info message
##############################################
if df.empty:
raise ValueError('DataFrame is empty')
logger.info('End of Process: {}'.format(time_monitor.elapsed_time()))
att_dict['memory'] = df.memory_usage(deep=True).sum() / 1024 ** 2
att_dict['columns'] = str(list(df.columns))
att_dict['shape'] = df.shape
att_dict['id'] = str(id(df))
logger.debug('Columns: {}'.format(str(df.columns)))
logger.debug('Shape (#rows - #columns): {} - {}'.format(df.shape[0], df.shape[1]))
logger.debug('Memory: {} kB'.format(att_dict['memory']))
example_rows = EXAMPLE_ROWS if df.shape[0] > EXAMPLE_ROWS else df.shape[0]
for i in range(0, example_rows):
att_dict['row_' + str(i)] = str([str(i)[:10].ljust(10) for i in df.iloc[i, :].tolist()])
logger.debug('Head data: {}'.format(att_dict['row_' + str(i)]))
# end custom process definition
log = log_stream.getvalue()
msg = api.Message(attributes=att_dict,body=df)
return log, msg
inports = [{'name': 'data', 'type': 'message.DataFrame',"description":"Input data"}]
outports = [{'name': 'log', 'type': 'string',"description":"Logging data"}, \
{'name': 'data', 'type': 'message.DataFrame',"description":"Output data"}]
def call_on_input(msg) :
log, msg = process(msg)
api.send(outports[0]['name'], log)
api.send(outports[1]['name'], msg)
api.set_port_callback([inports[0]['name']], call_on_input)
def main() :
print('Test: Default')
api.set_port_callback([inports[0]['name']], call_on_input)
| [
"53856509+thhapke@users.noreply.github.com"
] | 53856509+thhapke@users.noreply.github.com |
3c49719a402982f9cd144fe51380b257eeb1c980 | c4f984e22d5d53e4e50db07df8456ffc9f185f5d | /graphs/tests/graph_test.py | 21b9cd7001168581cd7011d2150eb15bad5384d8 | [] | no_license | Bo0mer/laughing-dangerzone | fc140fd785d1428d7a74a251399b6220941d5a8b | 3e80f5debc24a2c4473ea3e34c703fd2e932d4fc | refs/heads/master | 2021-01-17T17:07:34.253857 | 2013-07-05T10:23:26 | 2013-07-05T10:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,850 | py | import unittest
from graphs.graphs import Graph
class BasicGraphTest(unittest.TestCase):
def setUp(self):
self.nodes = [1, 'Sofia', 'Bourgas', 5.6555]
self.edges = [(1, 'Sofia'), ('Sofia', 'Bourgas')]
self.graph = Graph()
for node in self.nodes:
self.graph.add_node(node)
for edge in self.edges:
self.graph.add_edge(*edge)
def tearDown(self):
del self.nodes
del self.graph
def test_nodes_in_graph(self):
for node in self.nodes:
self.assertTrue(node in self.graph)
self.assertFalse('NotANode' in self.graph)
def test_edges_in_graph(self):
for edge in self.edges:
self.assertTrue(edge[0] in self.graph[edge[1]])
self.assertTrue(edge[1] in self.graph[edge[0]])
not_an_edge = 1, 'Bourgas'
self.assertFalse(not_an_edge[0] in self.graph[not_an_edge[1]])
self.assertFalse(not_an_edge[1] in self.graph[not_an_edge[0]])
def test_has_edge(self):
for edge in self.edges:
self.assertTrue(self.graph.has_edge(*edge))
not_an_edge = 1, 'Bourgas'
self.assertFalse(self.graph.has_edge(*not_an_edge))
def test_remove_edge(self):
edge = 1, 'Bourgas'
self.graph.add_edge(*edge)
self.graph.remove_edge(*edge)
self.assertFalse(self.graph.has_edge(*edge))
def test_size(self):
self.assertEqual(self.graph.size(), len(self.edges))
def test_order(self):
self.assertEqual(self.graph.order(), len(self.nodes))
def test_degree(self):
for node in self.nodes:
self.assertEqual(self.graph.degree(node),
sum([edge.count(node) for edge in self.edges]))
def test_is_directed(self):
self.assertFalse(self.graph.is_directed())
| [
"bo0merzzz@gmail.com"
] | bo0merzzz@gmail.com |
da6990b212765548549d6a7ed409b29dfd3ff68a | 758ca5e2bf50016fbac7022ac5f9036aa8aa099b | /LeetCodeWeb.py | 3359b7e6c9cd5b4f92bd6298419aa98886ca70f5 | [] | no_license | zhantong/leetcode-web | 04f17901e4bf5a6065e35dd126dd7bbcc8b1128f | 3f79f5463e77ed7eab8b808a7004eea8c29fc35e | refs/heads/master | 2021-01-02T22:54:54.797228 | 2017-10-19T02:00:48 | 2017-10-19T02:00:48 | 99,420,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,913 | py | from flask import Flask
from flask import render_template
from flask import request
from flask import redirect
from flask import g
import os.path
from pygments import highlight
from pygments.lexers import get_lexer_by_name
from pygments.formatters import HtmlFormatter
import sqlite3
app = Flask(__name__)
ROOT = os.path.realpath(os.path.dirname(__file__))
DATABASE = 'leetcode.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
@app.route('/')
def hello_world():
return redirect('/problems')
@app.route('/problems')
def show_problem_list():
problem_list = get_problem_list()
return render_template('problems_summary.html', problem_list=problem_list)
@app.route('/problems/<slug>')
def show_problem(slug):
c = get_db().cursor()
c.execute('SELECT id,title FROM problem WHERE slug=?', (slug,))
id, title = c.fetchone()
description_file_name = str(id).zfill(3) + '. ' + title + '.html'
file_path = os.path.join(ROOT, 'descriptions', description_file_name)
if os.path.exists(file_path):
with open(file_path, 'r', encoding='utf-8') as f:
description = f.read()
else:
description = '收费题目'
codes = get_codes(('python', 'java', 'c++'), id, title)
title = str(id) + '. ' + title
if 'X-PJAX' in request.headers:
return render_template('problem_description.html', description=description, codes=codes, title=title,
id=id)
return render_template('problem.html', description=description, codes=codes,
problem_list=get_problem_list(), title=title, id=id)
@app.teardown_appcontext
def close_connection(exception):
db = getattr(g, '_database', None)
if db is not None:
db.close()
def get_codes(code_types, id, title):
code_infos = {
'java': ('Java', 'java'),
'python': ('Python', 'py'),
'c++': ('C++', 'cpp')
}
codes = []
for code_type in code_types:
code_info = code_infos[code_type]
file_path = os.path.join(ROOT, 'submissions', str(id).zfill(3) + '. ' + title, code_info[0],
'Solution.' + code_info[1])
if not os.path.exists(file_path):
continue
with open(file_path, 'r', encoding='utf-8') as f:
code = highlight(f.read(), get_lexer_by_name(code_type), HtmlFormatter())
codes.append((code_info[0], code))
return codes
def get_problem_list():
problem_list = []
c = get_db().cursor()
for id, title, slug in c.execute('SELECT id,title,slug FROM problem ORDER BY id'):
problem_list.append({
'id': id,
'url': '/problems/' + slug,
'name': str(id).zfill(3) + '. ' + title
})
return problem_list
if __name__ == '__main__':
app.run()
| [
"zhantong1994@163.com"
] | zhantong1994@163.com |
3f864b3d1d5178eb9a78d7a79925324374c64f2b | a82cfe35fecd002d36e429caef3a11a8a886af44 | /curso-em-video/fibonacci.py | 176ca299ae49cddecbf8c7c92747b7184f8e298d | [] | no_license | na-thy/estudos-python | 3e0ab1f07aa61fe52bad9b2d677a924f2df51e8a | 5be4a76a302e15034f998a3838c77f7a75737e9d | refs/heads/master | 2023-01-22T09:31:21.953739 | 2020-12-03T17:38:04 | 2020-12-03T17:38:04 | 269,774,328 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 458 | py | import time
cache = {}
def fibonacci(n):
global cache
if n in cache:
return cache[n]
if n == 0:
result = 0
elif n == 1:
result = 1
else:
result = fibonacci(n-1) + fibonacci(n-2)
cache[n] = result
return result
start = time.time()
for i in range(0,21):
result = fibonacci(i)
print(i,result)
finish = time.time()
duration = finish - start
print('Computed all 20 in', duration, 'seconds') | [
"nathy.madureira@gmail.com"
] | nathy.madureira@gmail.com |
e6fbae53f15c88252258810835a67effd5b1dedc | 444a9480bce2035565332d4d4654244c0b5cd47b | /research/cv/PyramidBox/src/dataset.py | 1ba7f67436e429a92d5be659dd9f8a94bac85a93 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference",
"LicenseRef-scancode-proprietary-license"
] | permissive | mindspore-ai/models | 7ede9c6454e77e995e674628204e1c6e76bd7b27 | eab643f51336dbf7d711f02d27e6516e5affee59 | refs/heads/master | 2023-07-20T01:49:34.614616 | 2023-07-17T11:43:18 | 2023-07-17T11:43:18 | 417,393,380 | 301 | 92 | Apache-2.0 | 2023-05-17T11:22:28 | 2021-10-15T06:38:37 | Python | UTF-8 | Python | false | false | 6,634 | py | # Copyright 2022 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import random
from PIL import Image
import numpy as np
from mindspore import dataset as ds
from src.augmentations import preprocess
from src.prior_box import PriorBox
from src.bbox_utils import match_ssd
from src.config import cfg
class WIDERDataset:
"""docstring for WIDERDetection"""
def __init__(self, list_file, mode='train'):
super(WIDERDataset, self).__init__()
self.mode = mode
self.fnames = []
self.boxes = []
self.labels = []
prior_box = PriorBox(cfg)
self.default_priors = prior_box.forward()
self.num_priors = self.default_priors.shape[0]
self.match = match_ssd
self.threshold = cfg.FACE.OVERLAP_THRESH
self.variance = cfg.VARIANCE
with open(list_file) as f:
lines = f.readlines()
for line in lines:
line = line.strip().split()
num_faces = int(line[1])
box = []
label = []
for i in range(num_faces):
x = float(line[2 + 5 * i])
y = float(line[3 + 5 * i])
w = float(line[4 + 5 * i])
h = float(line[5 + 5 * i])
c = int(line[6 + 5 * i])
if w <= 0 or h <= 0:
continue
box.append([x, y, x + w, y + h])
label.append(c)
if box:
self.fnames.append(line[0])
self.boxes.append(box)
self.labels.append(label)
self.num_samples = len(self.boxes)
def __len__(self):
return self.num_samples
def __getitem__(self, index):
img, face_loc, face_conf, head_loc, head_conf = self.pull_item(index)
return img, face_loc, face_conf, head_loc, head_conf
def pull_item(self, index):
while True:
image_path = self.fnames[index]
img = Image.open(image_path)
if img.mode == 'L':
img = img.convert('RGB')
im_width, im_height = img.size
boxes = self.annotransform(np.array(self.boxes[index]), im_width, im_height)
label = np.array(self.labels[index])
bbox_labels = np.hstack((label[:, np.newaxis], boxes)).tolist()
img, sample_labels = preprocess(img, bbox_labels, self.mode)
sample_labels = np.array(sample_labels)
if sample_labels.size > 0:
face_target = np.hstack(
(sample_labels[:, 1:], sample_labels[:, 0][:, np.newaxis]))
assert (face_target[:, 2] > face_target[:, 0]).any()
assert (face_target[:, 3] > face_target[:, 1]).any()
face_box = face_target[:, :-1]
head_box = self.expand_bboxes(face_box)
head_target = np.hstack((head_box, face_target[
:, -1][:, np.newaxis]))
break
else:
index = random.randrange(0, self.num_samples)
face_truth = face_target[:, :-1]
face_label = face_target[:, -1]
face_loc_t, face_conf_t = self.match(self.threshold, face_truth, self.default_priors,
self.variance, face_label)
head_truth = head_target[:, :-1]
head_label = head_target[:, -1]
head_loc_t, head_conf_t = self.match(self.threshold, head_truth, self.default_priors,
self.variance, head_label)
return img, face_loc_t, face_conf_t, head_loc_t, head_conf_t
def annotransform(self, boxes, im_width, im_height):
boxes[:, 0] /= im_width
boxes[:, 1] /= im_height
boxes[:, 2] /= im_width
boxes[:, 3] /= im_height
return boxes
def expand_bboxes(self,
bboxes,
expand_left=2.,
expand_up=2.,
expand_right=2.,
expand_down=2.):
expand_bboxes = []
for bbox in bboxes:
xmin = bbox[0]
ymin = bbox[1]
xmax = bbox[2]
ymax = bbox[3]
w = xmax - xmin
h = ymax - ymin
ex_xmin = max(xmin - w / expand_left, 0.)
ex_ymin = max(ymin - h / expand_up, 0.)
ex_xmax = max(xmax + w / expand_right, 0.)
ex_ymax = max(ymax + h / expand_down, 0.)
expand_bboxes.append([ex_xmin, ex_ymin, ex_xmax, ex_ymax])
expand_bboxes = np.array(expand_bboxes)
return expand_bboxes
def create_val_dataset(mindrecord_file, batch_size, device_num=1, device_id=0, num_workers=8):
"""
Create user-defined mindspore dataset for training
"""
column_names = ['img', 'face_loc', 'face_conf', 'head_loc', 'head_conf']
ds.config.set_num_parallel_workers(num_workers)
ds.config.set_enable_shared_mem(False)
ds.config.set_prefetch_size(batch_size * 2)
train_dataset = ds.MindDataset(mindrecord_file, columns_list=column_names, shuffle=True,
shard_id=device_id, num_shards=device_num)
train_dataset = train_dataset.batch(batch_size=batch_size, drop_remainder=True)
return train_dataset
def create_train_dataset(cfg_, batch_size, device_num=1, device_id=0, num_workers=8):
"""
Create user-defined mindspore dataset for training
"""
column_names = ['img', 'face_loc', 'face_conf', 'head_loc', 'head_conf']
ds.config.set_num_parallel_workers(num_workers)
ds.config.set_enable_shared_mem(False)
ds.config.set_prefetch_size(batch_size * 2)
train_dataset = ds.GeneratorDataset(WIDERDataset(cfg_.FACE.TRAIN_FILE, mode='train'),
column_names=column_names, shuffle=True, num_shards=device_num,
shard_id=device_id)
train_dataset = train_dataset.batch(batch_size=batch_size)
return train_dataset
| [
"1162447022@qq.com"
] | 1162447022@qq.com |
444db5d3cea29b642c5fbcc9046c5ad11b7835bd | 44f2ec0b954c6444397c5c9fe2e8b11f77096565 | /drive.py | 360690ace39c05fa800ca50ae0e11ae2a4955628 | [] | no_license | aragun/behavioralcloning | 9b2c5c850da1db4b48a35b6bbf7adfae023a6ec7 | 5a6ee9cb89d7151c9c4b092d8b955eb0758f738f | refs/heads/master | 2021-01-12T05:08:19.323829 | 2017-02-05T09:58:07 | 2017-02-05T09:58:07 | 77,865,597 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,311 | py | import argparse
import base64
import json
from scipy.misc import imresize
import numpy as np
import socketio
import eventlet
import eventlet.wsgi
import time
from PIL import Image
from PIL import ImageOps
from flask import Flask, render_template
from io import BytesIO
from keras.models import model_from_json
# Fix error with Keras and TensorFlow
import tensorflow as tf
tf.python.control_flow_ops = tf
sio = socketio.Server()
app = Flask(__name__)
model = None
prev_image_array = None
@sio.on('telemetry')
def telemetry(sid, data):
# The current steering angle of the car
steering_angle = data["steering_angle"]
# The current throttle of the car
throttle = data["throttle"]
# The current speed of the car
speed = data["speed"]
# The current image from the center camera of the car
imgString = data["image"]
image = Image.open(BytesIO(base64.b64decode(imgString)))
image = imresize(np.asarray(image)/127.5-1.0, (66,200,3))
image = image[None, :, :, :]
# This model currently assumes that the features of the model are just the images. Feel free to change this.
steering_angle = float(model.predict(image, batch_size=1))
# The driving model currently just outputs a constant throttle. Feel free to edit this.
throttle = 0.1
print(steering_angle, throttle)
send_control(steering_angle, throttle)
@sio.on('connect')
def connect(sid, environ):
print("connect ", sid)
send_control(0, 0)
def send_control(steering_angle, throttle):
sio.emit("steer", data={
'steering_angle': steering_angle.__str__(),
'throttle': throttle.__str__()
}, skip_sid=True)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Remote Driving')
parser.add_argument('model', type=str,
help='Path to model definition json. Model weights should be on the same path.')
args = parser.parse_args()
with open(args.model, 'r') as jfile:
model = model_from_json(json.load(jfile))
model.compile("adam", "mse")
weights_file = args.model.replace('json', 'h5')
model.load_weights(weights_file)
# wrap Flask application with engineio's middleware
app = socketio.Middleware(sio, app)
# deploy as an eventlet WSGI server
eventlet.wsgi.server(eventlet.listen(('', 4567)), app) | [
"anuragprateek@gmail.com"
] | anuragprateek@gmail.com |
20c8f2ee5b139e07fc2887c5fd8d627d98c47418 | 2b0189b9a2dee259efce1b3a781f0af47709eb42 | /Chapter_8/8-11_Unchanged_Magicians.py | 30020d3763f9dcdb0cad2422d3640557b43404c3 | [] | no_license | rlongo02/Python-Book-Exercises | c4a8503753fe1048503ee8b77a389ea15d1da651 | fec20482069e9b79ba4ab9ac049ec1bac5c8ca85 | refs/heads/master | 2020-06-19T05:51:42.116008 | 2019-07-16T15:24:58 | 2019-07-16T15:24:58 | 196,587,224 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 473 | py | magicians = ['harry', 'ron', 'albus', 'draco']
copy_magicians = magicians[:]
def show_magicians(group):
for magician in group:
print(magician.title())
def make_great(group):
for magician in group:
magician = group.pop(0)
great_magician = "great " + magician
group.append(great_magician)
make_great(copy_magicians)
print("Great List:")
show_magicians(copy_magicians)
print('')
print('Unchanged List:')
show_magicians(magicians)
| [
"noreply@github.com"
] | noreply@github.com |
6e875096a2480ee4a294737b2fac3a8b6a82e631 | ee30be0c44e8e64e1d947134f35aebfdc98deec6 | /MasteringPythonDataAnalysis/t1.py | d943cdf9415ff6b1fc07b287d12ab60c3ba32c24 | [] | no_license | herofyf/python_examples | facf614fdce795b989316feb41a049161e65674d | 5d06d1713321a8843da71eee79770c784c193480 | refs/heads/master | 2021-06-11T12:02:28.218089 | 2017-02-14T06:42:15 | 2017-02-14T06:42:15 | 81,889,384 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,601 | py | import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from MasteringPythonDataAnalysis.mydespine import despine
co2_gr = pd.read_csv('co2_gr_gl.txt', delim_whitespace=True,
skiprows=62, names=['year', 'rate', 'err'])
def showOrigin():
fig, ax = plt.subplots(1,1)
ax.errorbar(co2_gr['year'],
co2_gr['rate'],
yerr= co2_gr['err'],
ls = 'None',
elinewidth=1.5,
capthick=1.5,
marker = '.',
ms = 8)
despine(ax)
plt.minorticks_on()
#plt.show()
from sklearn.linear_model import LinearRegression, Lasso
from sklearn import cross_validation
x_test, x_train, y_test, y_train =\
cross_validation.train_test_split(
co2_gr['year'], co2_gr['rate'],
test_size= 0.75, random_state=0
)
X_train = x_train[:, np.newaxis]
X_test = x_test[:, np.newaxis]
line_x = np.array([1955, 2025])
est_lin = LinearRegression()
est_lin.fit(X_train, y_train)
temp = line_x.reshape(-1, 1)
lin_pred = est_lin.predict(temp)
def printStuff(estimator, A, b):
name = estimator.__str__()
name = name.split('(')[0]
print('+'*6, name, '+'* 6)
print('Slope: {0:.3f} Intercept:{1:.2f} '.format(
estimator.coef_[0], estimator.intercept_))
predTest = estimator.predict(A)
print("Mean squared residuals: {0:.2f}".format(
np.mean(predTest - b) ** 2
))
print("Variance score: {0:.2f}".format(
estimator.score(A, b)
))
printStuff(est_lin, X_test, y_test)
| [
"herofyf@hotmail.com"
] | herofyf@hotmail.com |
7fec3d04fbd22569461ebff08189a30e332dd449 | a2106b7fe56ccb03ee923ccc53e3f7cdf8e65b62 | /bayes/rssTest.py | 370e109b2d97a0b8fa9ecc8644bc48d5ccd86b54 | [] | no_license | MayYk/MachineLearninginAction | 0a46d9525a6cb59d9be7587ebded86b595eb08d7 | 6ddf7e77cdcf0e0ba99eb963aa8c0751bb59fea2 | refs/heads/master | 2020-03-07T03:32:51.855369 | 2019-07-11T01:17:31 | 2019-07-11T01:17:31 | 127,239,305 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,434 | py | #!user/bin/env python
# _*_ coding:utf-8 _*_
import bayes
from numpy import *
import feedparser
# RSS源分类器及高频词去除函数
def calcMostFreq(vocabList, fullText):
import operator
freqDict = {}
for token in vocabList:
freqDict[token] = fullText.count(token)
sortedFreq = sorted(freqDict.items(), key = operator.itemgetter(1), reverse = True)
return sortedFreq[:30]
def localWords(feed1, feed0):
import feedparser
docList = [];
classList = [];
fullText = []
minLen = min(len(feed1['entries']),len(feed0['entries']))
for i in range(minLen):
# 每次访问一条RSS源
wordList = bayes.textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = bayes.textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = bayes.createVocabList(docList)
top30Words = calcMostFreq(vocabList, fullText)
# 去掉出现频数最高的词
for pairW in top30Words:
if pairW[0] in vocabList:
vocabList.remove(pairW[0])
trainingSet = list(range(2*minLen))
testSet = []
for i in range(20):
randIndex = int(random.uniform(0, len(trainingSet)))
testSet.append(trainingSet[randIndex])
del(trainingSet[randIndex])
trainMat = []
trainClasses = []
for docIndex in trainingSet:
trainMat.append(bayes.bagOfWords2VecMN(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V,p1V,pSpam = bayes.trainNB0(array(trainMat), array(trainClasses))
errorCount = 0
for docIndex in testSet:
wordVector = bayes.bagOfWords2VecMN(vocabList, docList[docIndex])
if bayes.classifyNB(array(wordVector), p0V, p1V, pSpam) != classList[docIndex]:
errorCount += 1
print('the error rate is:', float(errorCount)/len(testSet))
return vocabList, p0V, p1V
# 最具表征性的词汇显示函数
def getTopWords(ny, sf):
import operator
vocabList, p0V, p1V = localWords(ny, sf)
topNY = []
topSF = []
for i in range(len(p0V)):
if p0V[i] > -4.5:
topSF.append((vocabList[i], p0V[i]))
if p1V[i] > -4.5:
topNY.append((vocabList[i], p1V[i]))
sortedSF = sorted(topSF, key=lambda pair:pair[1], reverse=True)
print('SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF**SF')
for item in sortedSF:
print(item[0])
sortedNY = sorted(topNY, key=lambda pair:pair[1], reverse=True)
print('NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY**NY')
for item in sortedNY:
print(item[0])
# 教程链接不可用,修改措施
# 修改RSS城市来源:RSS说明:http://brittanyherself.com/cgg/tutorial-how-to-subscribe-to-craigslists-rss-feeds/
# 或者
# 更换数据网站来源:http://www.cnblogs.com/femaleprogramer/p/3854970.html
# ny = feedparser.parse('http://newyork.craigslist.org/stp/index.rss')
# sf = feedparser.parse('http://sybay.craigslist.org/stp/index.rss')
if __name__ == '__main__':
ny = feedparser.parse('https://newyork.craigslist.org/search/ats?format=rss')
sf = feedparser.parse('https://syracuse.craigslist.org/search/ats?format=rss')
# vocabList, pSF, pNY = localWords(ny, sf)
getTopWords(ny, sf)
| [
"471519146@qq.com"
] | 471519146@qq.com |
f41bb92d7a8588b556a3187e89551e60d327b03e | 6fa7f99d3d3d9b177ef01ebf9a9da4982813b7d4 | /gPJTSqmJ4qQPxRg5a_21.py | 9fc9af14fff56aa6101c0d3a246a2a1ebb7d3158 | [] | no_license | daniel-reich/ubiquitous-fiesta | 26e80f0082f8589e51d359ce7953117a3da7d38c | 9af2700dbe59284f5697e612491499841a6c126f | refs/heads/master | 2023-04-05T06:40:37.328213 | 2021-04-06T20:17:44 | 2021-04-06T20:17:44 | 355,318,759 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 79 | py |
def func(num):
txt = str(num)
return sum(int(i) - len(txt) for i in txt)
| [
"daniel.reich@danielreichs-MacBook-Pro.local"
] | daniel.reich@danielreichs-MacBook-Pro.local |
b1e4cbf0ffddc1664eba106d1db12c5e68f7c59a | c1b901ed1eee4d5dc2ee252cd51b4e3c14f02554 | /Lime/output_extract.py | 22f7cbaed309f1eb2d7d5ed7fcd449a272591465 | [
"MIT"
] | permissive | lengjiayi/SpeakerVerifiaction-pytorch | 70a86c9c9029a214679e636917fb305a85a94182 | 99eb8de3357c85e2b7576da2a742be2ffd773ead | refs/heads/master | 2023-07-09T20:09:07.715305 | 2021-08-19T11:03:28 | 2021-08-19T11:03:28 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 20,168 | py | #!/usr/bin/env python
# encoding: utf-8
"""
@Author: yangwenhao
@Contact: 874681044@qq.com
@Software: PyCharm
@File: output_extract.py
@Time: 2020/3/21 5:57 PM
@Overview:
"""
from __future__ import print_function
import argparse
import json
import os
import pickle
import random
import time
from collections import OrderedDict
import numpy as np
import torch
import torch._utils
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torchvision.transforms as transforms
from kaldi_io import read_mat
from torch.autograd import Variable
from torch.utils.data import DataLoader
from tqdm import tqdm
from Define_Model.SoftmaxLoss import AngleLinear, AdditiveMarginLinear
from Define_Model.model import PairwiseDistance
from Process_Data.Datasets.KaldiDataset import ScriptTrainDataset, \
ScriptTestDataset, ScriptValidDataset
from Process_Data.audio_processing import ConcateOrgInput, mvnormal, ConcateVarInput
from TrainAndTest.common_func import create_model
# Version conflict
try:
torch._utils._rebuild_tensor_v2
except AttributeError:
def _rebuild_tensor_v2(storage, storage_offset, size, stride, requires_grad, backward_hooks):
tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, stride)
tensor.requires_grad = requires_grad
tensor._backward_hooks = backward_hooks
return tensor
torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2
import warnings
warnings.filterwarnings("ignore")
# Training settings
parser = argparse.ArgumentParser(description='PyTorch Speaker Recognition')
# Data options
parser.add_argument('--train-dir', type=str, help='path to dataset')
parser.add_argument('--test-dir', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--train-set-name', type=str, required=True, help='path to voxceleb1 test dataset')
parser.add_argument('--test-set-name', type=str, required=True, help='path to voxceleb1 test dataset')
parser.add_argument('--sitw-dir', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--sample-utt', type=int, default=120, metavar='SU', help='Dimensionality of the embedding')
parser.add_argument('--test-only', action='store_true', default=False, help='using Cosine similarity')
parser.add_argument('--check-path', help='folder to output model checkpoints')
parser.add_argument('--extract-path', help='folder to output model grads, etc')
parser.add_argument('--start-epochs', type=int, default=36, metavar='E', help='number of epochs to train (default: 10)')
parser.add_argument('--epochs', type=int, default=36, metavar='E', help='number of epochs to train (default: 10)')
# Data options
parser.add_argument('--feat-dim', default=64, type=int, metavar='N', help='acoustic feature dimension')
parser.add_argument('--input-dim', default=257, type=int, metavar='N', help='acoustic feature dimension')
parser.add_argument('--revert', action='store_true', default=False, help='using Cosine similarity')
parser.add_argument('--input-length', choices=['var', 'fix'], default='var',
help='choose the acoustic features type.')
parser.add_argument('--remove-vad', action='store_true', default=False, help='using Cosine similarity')
parser.add_argument('--mvnorm', action='store_true', default=False,
help='using Cosine similarity')
# Model options
parser.add_argument('--model', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--resnet-size', default=8, type=int, metavar='RES', help='The channels of convs layers)')
parser.add_argument('--filter', type=str, default='None', help='replace batchnorm with instance norm')
parser.add_argument('--input-norm', type=str, default='Mean', help='batchnorm with instance norm')
parser.add_argument('--vad', action='store_true', default=False, help='vad layers')
parser.add_argument('--inception', action='store_true', default=False, help='multi size conv layer')
parser.add_argument('--inst-norm', action='store_true', default=False, help='batchnorm with instance norm')
parser.add_argument('--mask-layer', type=str, default='None', help='time or freq masking layers')
parser.add_argument('--mask-len', type=int, default=20, help='maximum length of time or freq masking layers')
parser.add_argument('--block-type', type=str, default='None', help='replace batchnorm with instance norm')
parser.add_argument('--relu-type', type=str, default='relu', help='replace batchnorm with instance norm')
parser.add_argument('--encoder-type', type=str, help='path to voxceleb1 test dataset')
parser.add_argument('--transform', type=str, default="None", help='add a transform layer after embedding layer')
parser.add_argument('--channels', default='64,128,256', type=str,
metavar='CHA', help='The channels of convs layers)')
parser.add_argument('--fast', action='store_true', default=False, help='max pooling for fast')
parser.add_argument('--kernel-size', default='5,5', type=str, metavar='KE',
help='kernel size of conv filters')
parser.add_argument('--padding', default='', type=str, metavar='KE', help='padding size of conv filters')
parser.add_argument('--stride', default='2', type=str, metavar='ST', help='stride size of conv filters')
parser.add_argument('--time-dim', default=1, type=int, metavar='FEAT', help='acoustic feature dimension')
parser.add_argument('--avg-size', type=int, default=4, metavar='ES', help='Dimensionality of the embedding')
parser.add_argument('--loss-type', type=str, default='soft', help='path to voxceleb1 test dataset')
parser.add_argument('--dropout-p', type=float, default=0., metavar='BST',
help='input batch size for testing (default: 64)')
# args for additive margin-softmax
parser.add_argument('--margin', type=float, default=0.3, metavar='MARGIN',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--s', type=float, default=15, metavar='S',
help='the margin value for the angualr softmax loss function (default: 3.0')
# args for a-softmax
parser.add_argument('--m', type=int, default=3, metavar='M',
help='the margin value for the angualr softmax loss function (default: 3.0')
parser.add_argument('--lambda-min', type=int, default=5, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--lambda-max', type=float, default=0.05, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--alpha', default=12, type=float,
metavar='l2 length', help='acoustic feature dimension')
parser.add_argument('--cos-sim', action='store_true', default=True, help='using Cosine similarity')
parser.add_argument('--embedding-size', type=int, metavar='ES', help='Dimensionality of the embedding')
parser.add_argument('--nj', default=12, type=int, metavar='NJOB', help='num of job')
parser.add_argument('--batch-size', type=int, default=1, metavar='BS',
help='input batch size for training (default: 128)')
parser.add_argument('--test-batch-size', type=int, default=1, metavar='BST',
help='input batch size for testing (default: 64)')
parser.add_argument('--input-per-spks', type=int, default=192, metavar='IPFT',
help='input sample per file for testing (default: 8)')
parser.add_argument('--test-input-per-file', type=int, default=1, metavar='IPFT',
help='input sample per file for testing (default: 8)')
# Device options
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--gpu-id', default='1', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--seed', type=int, default=123456, metavar='S',
help='random seed (default: 0)')
parser.add_argument('--log-interval', type=int, default=1, metavar='LI',
help='how many batches to wait before logging training status')
parser.add_argument('--acoustic-feature', choices=['fbank', 'spectrogram', 'mfcc'], default='fbank',
help='choose the acoustic features type.')
parser.add_argument('--makemfb', action='store_true', default=False,
help='need to make mfb file')
parser.add_argument('--makespec', action='store_true', default=False,
help='need to make spectrograms file')
args = parser.parse_args()
# Set the device to use by setting CUDA_VISIBLE_DEVICES env variable in
# order to prevent any memory allocation on unused GPUs
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_id
args.cuda = not args.no_cuda and torch.cuda.is_available()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.multiprocessing.set_sharing_strategy('file_system')
if args.cuda:
cudnn.benchmark = True
# Define visulaize SummaryWriter instance
kwargs = {'num_workers': args.nj, 'pin_memory': False} if args.cuda else {}
l2_dist = nn.CosineSimilarity(dim=1, eps=1e-6) if args.cos_sim else PairwiseDistance(2)
if args.input_length == 'var':
transform = transforms.Compose([
ConcateOrgInput(remove_vad=args.remove_vad),
])
transform_T = transforms.Compose([
ConcateOrgInput(remove_vad=args.remove_vad),
])
elif args.input_length == 'fix':
transform = transforms.Compose([
ConcateVarInput(remove_vad=args.remove_vad),
])
transform_T = transforms.Compose([
ConcateVarInput(remove_vad=args.remove_vad),
])
if args.mvnorm:
transform.transforms.append(mvnormal())
transform_T.transforms.append(mvnormal())
file_loader = read_mat
train_dir = ScriptTrainDataset(dir=args.train_dir, samples_per_speaker=args.input_per_spks,
loader=file_loader, transform=transform, return_uid=True)
indices = list(range(len(train_dir)))
random.shuffle(indices)
indices = indices[:args.sample_utt]
train_part = torch.utils.data.Subset(train_dir, indices)
veri_dir = ScriptTestDataset(dir=args.train_dir, loader=file_loader, transform=transform_T, return_uid=True)
veri_dir.partition(args.sample_utt)
test_dir = ScriptTestDataset(dir=args.test_dir, loader=file_loader, transform=transform_T, return_uid=True)
test_dir.partition(args.sample_utt)
valid_dir = ScriptValidDataset(valid_set=train_dir.valid_set, spk_to_idx=train_dir.spk_to_idx,
valid_uid2feat=train_dir.valid_uid2feat, valid_utt2spk_dict=train_dir.valid_utt2spk_dict,
loader=file_loader, transform=transform, return_uid=True)
indices = list(range(len(valid_dir)))
random.shuffle(indices)
indices = indices[:args.sample_utt]
valid_part = torch.utils.data.Subset(valid_dir, indices)
def train_extract(train_loader, model, file_dir, set_name, save_per_num=2500):
# switch to evaluate mode
model.eval()
input_grads = []
inputs_uids = []
pbar = tqdm(enumerate(train_loader))
for batch_idx, (data, label, uid) in pbar:
# orig = data.detach().numpy().squeeze().astype(np.float32)
data = Variable(data.cuda(), requires_grad=True)
logit, _ = model(data)
if args.loss_type == 'asoft':
classifed, _ = logit
else:
classifed = logit
# conv1 = model.conv1(data)
# bn1 = model.bn1(conv1)
# relu1 = model.relu(bn1)
# conv1 = conv1.cpu().detach().numpy().squeeze().astype(np.float32)
# bn1 = bn1.cpu().detach().numpy().squeeze().astype(np.float32)
# relu1 = relu1.cpu().detach().numpy().squeeze().astype(np.float32)
classifed[0][label.long()].backward()
grad = data.grad.cpu().numpy().squeeze().astype(np.float32)
data = data.data.cpu().numpy().squeeze().astype(np.float32)
if args.revert:
grad = grad.transpose()
data = data.transpose()
input_grads.append([data, grad])
inputs_uids.append(uid)
model.zero_grad()
if batch_idx % args.log_interval == 0:
pbar.set_description('Saving {} : [{:8d}/{:8d} ({:3.0f}%)] '.format(
uid,
batch_idx + 1,
len(train_loader.dataset),
100. * batch_idx / len(train_loader)))
if (batch_idx + 1) % save_per_num == 0 or (batch_idx + 1) == len(train_loader.dataset):
num = batch_idx // save_per_num if batch_idx + 1 % save_per_num == 0 else batch_idx // save_per_num + 1
# checkpoint_dir / extract / < dataset > / < set >.*.bin
filename = file_dir + '/%s.%d.bin' % (set_name, num)
with open(filename, 'wb') as f:
pickle.dump(input_grads, f)
with open(file_dir + '/inputs.%s.%d.json' % (set_name, num), 'w') as f:
json.dump(inputs_uids, f)
input_grads = []
inputs_uids = []
print('Saving pairs in %s.\n' % file_dir)
torch.cuda.empty_cache()
def test_extract(test_loader, model, file_dir, set_name, save_per_num=1500):
# switch to evaluate mode
model.eval()
input_grads = []
inputs_uids = []
pbar = tqdm(enumerate(test_loader))
# for batch_idx, (data_a, data_b, label) in pbar:
for batch_idx, (data_a, data_b, label, uid_a, uid_b) in pbar:
# pdb.set_trace()
data_a = Variable(data_a.cuda(), requires_grad=True)
data_b = Variable(data_b.cuda(), requires_grad=True)
_, feat_a = model(data_a)
_, feat_b = model(data_b)
cos_sim = l2_dist(feat_a, feat_b)
cos_sim[0].backward()
grad_a = data_a.grad.cpu().numpy().squeeze().astype(np.float32)
grad_b = data_b.grad.cpu().numpy().squeeze().astype(np.float32)
data_a = data_a.data.cpu().numpy().squeeze().astype(np.float32)
data_b = data_b.data.cpu().numpy().squeeze().astype(np.float32)
if args.revert:
grad_a = grad_a.transpose()
data_a = data_a.transpose()
grad_b = grad_b.transpose()
data_b = data_b.transpose()
input_grads.append((label, grad_a, grad_b, data_a, data_b))
inputs_uids.append([uid_a, uid_b])
model.zero_grad()
if batch_idx % args.log_interval == 0:
pbar.set_description('Saving pair [{:8d}/{:8d} ({:3.0f}%)] '.format(
batch_idx + 1,
len(test_loader),
100. * batch_idx / len(test_loader)))
if (batch_idx + 1) % save_per_num == 0 or (batch_idx + 1) == len(test_loader.dataset):
num = batch_idx // save_per_num if batch_idx + 1 % save_per_num == 0 else batch_idx // save_per_num + 1
# checkpoint_dir / extract / < dataset > / < set >.*.bin
filename = file_dir + '/%s.%d.bin' % (set_name, num)
# print('Saving pairs in %s.' % filename)
with open(filename, 'wb') as f:
pickle.dump(input_grads, f)
with open(file_dir + '/inputs.%s.%d.json' % (set_name, num), 'w') as f:
json.dump(inputs_uids, f)
input_grads = []
inputs_uids = []
print('Saving pairs into %s.\n' % file_dir)
torch.cuda.empty_cache()
def main():
print('\nNumber of Speakers: {}.'.format(train_dir.num_spks))
# print the experiment configuration
print('Current time is \33[91m{}\33[0m.'.format(str(time.asctime())))
print('Parsed options: {}'.format(vars(args)))
# instantiate model and initialize weights
kernel_size = args.kernel_size.split(',')
kernel_size = [int(x) for x in kernel_size]
if args.padding == '':
padding = [int((x - 1) / 2) for x in kernel_size]
else:
padding = args.padding.split(',')
padding = [int(x) for x in padding]
kernel_size = tuple(kernel_size)
padding = tuple(padding)
stride = args.stride.split(',')
stride = [int(x) for x in stride]
channels = args.channels.split(',')
channels = [int(x) for x in channels]
model_kwargs = {'input_dim': args.input_dim, 'feat_dim': args.feat_dim, 'kernel_size': kernel_size,
'mask': args.mask_layer, 'mask_len': args.mask_len, 'block_type': args.block_type,
'filter': args.filter, 'inst_norm': args.inst_norm, 'input_norm': args.input_norm,
'stride': stride, 'fast': args.fast, 'avg_size': args.avg_size, 'time_dim': args.time_dim,
'padding': padding, 'encoder_type': args.encoder_type, 'vad': args.vad,
'transform': args.transform, 'embedding_size': args.embedding_size, 'ince': args.inception,
'resnet_size': args.resnet_size, 'num_classes': train_dir.num_spks,
'channels': channels, 'alpha': args.alpha, 'dropout_p': args.dropout_p}
print('Model options: {}'.format(model_kwargs))
model = create_model(args.model, **model_kwargs)
if args.loss_type == 'asoft':
model.classifier = AngleLinear(in_features=args.embedding_size, out_features=train_dir.num_spks, m=args.m)
elif args.loss_type == 'amsoft' or args.loss_type == 'arcsoft':
model.classifier = AdditiveMarginLinear(feat_dim=args.embedding_size, n_classes=train_dir.num_spks)
train_loader = DataLoader(train_part, batch_size=args.batch_size, shuffle=False, **kwargs)
veri_loader = DataLoader(veri_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
valid_loader = DataLoader(valid_part, batch_size=args.batch_size, shuffle=False, **kwargs)
test_loader = DataLoader(test_dir, batch_size=args.batch_size, shuffle=False, **kwargs)
# sitw_test_loader = DataLoader(sitw_test_part, batch_size=args.batch_size, shuffle=False, **kwargs)
# sitw_dev_loader = DataLoader(sitw_dev_part, batch_size=args.batch_size, shuffle=False, **kwargs)
resume_path = args.check_path + '/checkpoint_{}.pth'
print('=> Saving output in {}\n'.format(args.extract_path))
epochs = np.arange(args.start_epochs, args.epochs + 1)
for e in epochs:
# Load model from Checkpoint file
if os.path.isfile(resume_path.format(e)):
print('=> loading checkpoint {}'.format(resume_path.format(e)))
checkpoint = torch.load(resume_path.format(e))
checkpoint_state_dict = checkpoint['state_dict']
if isinstance(checkpoint_state_dict, tuple):
checkpoint_state_dict = checkpoint_state_dict[0]
# epoch = checkpoint['epoch']
# if e == 0:
# filtered = checkpoint.state_dict()
# else:
filtered = {k: v for k, v in checkpoint_state_dict.items() if 'num_batches_tracked' not in k}
if list(filtered.keys())[0].startswith('module'):
new_state_dict = OrderedDict()
for k, v in filtered.items():
name = k[7:] # remove `module.`,表面从第7个key值字符取到最后一个字符,去掉module.
new_state_dict[name] = v # 新字典的key值对应的value为一一对应的值。
model.load_state_dict(new_state_dict)
else:
model_dict = model.state_dict()
model_dict.update(filtered)
model.load_state_dict(model_dict)
else:
print('=> no checkpoint found at %s' % resume_path.format(e))
continue
model.cuda()
file_dir = args.extract_path + '/epoch_%d' % e
if not os.path.exists(file_dir):
os.makedirs(file_dir)
if not args.test_only:
# if args.cuda:
# model_conv1 = model.conv1.weight.cpu().detach().numpy()
# np.save(file_dir + '/model.conv1.npy', model_conv1)
train_extract(train_loader, model, file_dir, '%s_train'%args.train_set_name)
train_extract(valid_loader, model, file_dir, '%s_valid'%args.train_set_name)
test_extract(veri_loader, model, file_dir, '%s_veri'%args.train_set_name)
test_extract(test_loader, model, file_dir, '%s_test'%args.test_set_name)
if __name__ == '__main__':
main()
| [
"874681044@qq.com"
] | 874681044@qq.com |
fce51080a2168f4847bf2527f71160a3f4db1567 | 46e652b02bcc7c7fd4dedbf08ac9addd2bf92200 | /tests/gcloud_credentials.py | 9a0e1ce0e0870a735f8d2519be68fdbc563d5559 | [
"Apache-2.0"
] | permissive | rotemvil1/ndb-orm | f61a84ca2360f45e0fc35959e7241582402e64b0 | 54880d535d2853b6e657f60fb205f9f145f4a5cc | refs/heads/master | 2020-07-04T08:52:57.850460 | 2019-08-13T22:14:52 | 2019-08-13T22:14:52 | 202,229,900 | 0 | 0 | Apache-2.0 | 2019-08-13T22:02:25 | 2019-08-13T22:02:25 | null | UTF-8 | Python | false | false | 833 | py | # coding: utf-8
# vim: sts=2:ts=2:sw=2
import google.auth.credentials
# from https://github.com/GoogleCloudPlatform/google-cloud-python/blob/master/test_utils/test_utils/system.py
class EmulatorCredentials(google.auth.credentials.Credentials):
"""A mock credential object.
Used to avoid unnecessary token refreshing or reliance on the network
while an emulator is running.
"""
def __init__(self): # pylint: disable=super-init-not-called
self.token = b'seekrit'
self.expiry = None
@property
def valid(self):
"""Would-be validity check of the credentials.
Always is :data:`True`.
"""
return True
def refresh(self, _unused_request): # pylint: disable=unused-argument,no-self-use
"""Off-limits implementation for abstract method."""
raise RuntimeError('Should never be refreshed.')
| [
"elastic.code@gmail.com"
] | elastic.code@gmail.com |
3a07068cceab13f0b25a268e962e98b265899191 | 9d086255b9fc36a4b91a90a944f81a5f0c517fe1 | /evaluation/python_scripts/convert_cm_to_metis.py | 94ba60070f0df7c7a2f916375664041e2ce0bdfb | [] | no_license | AlleHop/qtm-weighted-evaluation | f5be33b2bf8bfe6c744924e55d929014c87334c0 | 4b4152fc51e9acef140a2238d05fdedc93b8a968 | refs/heads/main | 2023-06-06T13:57:50.194361 | 2021-06-22T16:48:03 | 2021-06-22T16:48:03 | 313,912,220 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,395 | py | #!/usr/bin/env python3
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Convert protein .cm files to metis graph files')
parser.add_argument('--threshold', help='The threshold', default=0, type=float)
parser.add_argument('input', help='The input file')
parser.add_argument('output', help='The output file')
args = parser.parse_args()
n = m = 0
with open(args.input, 'r') as input_file:
for ln, line in enumerate(input_file):
if ln == 0:
n = int(line)
neighbors = [[] for i in range(n)]
elif ln <= n:
continue
elif line.rstrip():
u = ln - n
expected_neighbors = n - u
all_neighbors = line.split('\t')
assert(len(all_neighbors) == expected_neighbors)
for i, weight in enumerate(map(float, all_neighbors)):
v = u + i + 1
if weight >= args.threshold:
neighbors[u-1].append(v)
neighbors[v-1].append(u)
m += 1
with open(args.output, 'w') as output_file:
print("{} {} 0".format(n, m), file=output_file)
for neigh in neighbors:
print("{}".format(" ".join(map(str, neigh))), file=output_file)
| [
"ujeyh@student.kit.edu"
] | ujeyh@student.kit.edu |
298867573fe842d683f4131744c0ce53363adb83 | 83498441b790a14e62b70c2cf374e828f75e8801 | /flaskapplication/actions/mltasks/variables.py | 22de10cf4af6928fb75e582181c2bf4e84eb8856 | [] | no_license | amruthsagarkuppili/MLtimeseriesthesis | 3f9c2cc0887c72e5ebdd576fcedc2ea4cc8cc902 | d6039f3bfd9803d7cd51364058f6b0bcc7716a76 | refs/heads/master | 2023-07-14T15:25:38.898261 | 2021-08-30T20:11:27 | 2021-08-30T20:11:27 | 391,723,566 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,960 | py |
directoryloc = 'flaskapplication/actions/generatedcsv/'
historypredloc= '/Users/amruthkuppili/Desktop/proj/SMAFlaskOrganized/flaskapplication/actions/generatedcsv/history_predictions.csv'
predloc = '/Users/amruthkuppili/Desktop/proj/SMAFlaskOrganized/flaskapplication/actions/generatedcsv/predictions.csv'
scalermodel = '/Users/amruthkuppili/Desktop/proj/SMAFlaskOrganized/flaskapplication/actions/scalermodelrepo/'
ECCClink = 'http://www.meds-sdmm.dfo-mpo.gc.ca/alphapro/wave/waveshare/csvData/c44258_csv.zip'
SMAlink = 'https://www.smartatlantic.ca/erddap/tabledap/SMA_halifax.csv?station_name%2Ctime%2Clongitude%2Clatitude%2Cwind_spd_avg%2Cwind_spd_max%2Cwind_dir_avg%2Cair_temp_avg%2Cair_pressure_avg%2Csurface_temp_avg%2Cwave_ht_max%2Cwave_ht_sig%2Cwave_dir_avg%2Cwave_spread_avg%2Cwave_period_max%2Ccurr_spd_avg%2Ccurr_dir_avg%2Ccurr_spd2_avg%2Ccurr_dir2_avg%2Ccurr_spd3_avg%2Ccurr_dir3_avg%2Ccurr_spd4_avg%2Ccurr_dir4_avg%2Ccurr_spd5_avg%2Ccurr_dir5_avg%2Ccurr_spd6_avg%2Ccurr_dir6_avg%2Ccurr_spd7_avg%2Ccurr_dir7_avg%2Ccurr_spd8_avg%2Ccurr_dir8_avg%2Ccurr_spd9_avg%2Ccurr_dir9_avg%2Ccurr_spd10_avg%2Ccurr_dir10_avg%2Ccurr_spd11_avg%2Ccurr_dir11_avg%2Ccurr_spd12_avg%2Ccurr_dir12_avg%2Ccurr_spd13_avg%2Ccurr_dir13_avg%2Ccurr_spd14_avg%2Ccurr_dir14_avg%2Ccurr_spd15_avg%2Ccurr_dir15_avg%2Ccurr_spd16_avg%2Ccurr_dir16_avg%2Ccurr_spd17_avg%2Ccurr_dir17_avg%2Ccurr_spd18_avg%2Ccurr_dir18_avg%2Ccurr_spd19_avg%2Ccurr_dir19_avg%2Ccurr_spd20_avg%2Ccurr_dir20_avg&time%3E=2013-11-07T16%3A23%3A01Z&time%3C=2020-09-07T18%3A53%3A01Z'
thresholds = {
"sig_wv_ht_threshold_safe" : 1.25,
"sig_wv_ht_threshold_moderate" : 2,
"mx_wv_ht_threshold_safe" : 5,
"mx_wv_ht_threshold_moderate" : 6,
"wv_prd_threshold_safe" : 6,
"wv_prd_threshold_moderate" : 7,
"wnd_spd_threshold_safe" : 10,
"wnd_spd_ht_threshold_moderate" : 15
}
timestepsmodel = 3
anomalytimestepsmodel = 1
featurecolumns = 6
targetcolumns = 4
confidence = 95 | [
"amruth.sagar.kuppili@outlook.com"
] | amruth.sagar.kuppili@outlook.com |
063c1d4ad0571db3958a3dc5ba8a0060c1892f19 | 5c9d0f7a0b74613dc633004dcaa000f36cdd6096 | /tests/test_workspace.py | 0ea2e3c65e25ce71f80a652a0c8e9cadabbd6da7 | [
"Apache-2.0"
] | permissive | mayani-nv/model_navigator | 05291ed5f2fea7fd286da38f231cf3e391d2f82a | 925255bbeb9be7ac6f35407267e87a29a33087ab | refs/heads/main | 2023-07-17T02:24:13.432380 | 2021-08-17T15:24:15 | 2021-08-17T15:24:15 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,108 | py | # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
from pathlib import Path
from model_navigator.utils.workspace import Workspace
def test_workspace_exists():
"""Workspace path exists - is created"""
with tempfile.TemporaryDirectory() as temp_dir:
workspace = Workspace(temp_dir)
assert workspace.path == Path(temp_dir)
assert workspace.path.exists()
assert workspace.exists()
dummy_workspace_path = Path(temp_dir) / "dummy/workspace"
workspace = Workspace(dummy_workspace_path)
assert workspace.path == dummy_workspace_path
assert workspace.path.exists()
assert workspace.exists()
def test_workspace_empty():
"""Verifying workspace empty method"""
with tempfile.TemporaryDirectory() as temp_dir:
workspace = Workspace(temp_dir)
assert workspace.path == Path(temp_dir)
assert workspace.empty()
_create_dummy_file(workspace)
assert not workspace.empty()
def test_workspace_cleaning():
"""Test cleaning of workspace"""
with tempfile.TemporaryDirectory() as temp_dir:
workspace = Workspace(temp_dir)
_create_dummy_file(workspace)
assert not workspace.empty()
workspace.clean()
assert workspace.exists()
assert workspace.empty()
def _create_dummy_file(workspace):
dummy_path = workspace.path / "foo/bar.txt"
dummy_path.parent.mkdir(parents=True)
with dummy_path.open("w") as dummy_file:
dummy_file.write("foo bar")
| [
"pziecina@nvidia.com"
] | pziecina@nvidia.com |
a48d018ca8646335dbdf1231fa79901aa250f7d4 | f2b3b709a4e85266331967f958bd517caa7fb8d2 | /stocks/prophet_example.py | 0fe2fe7b292e4981592f7987d9598eda921efcd9 | [] | no_license | XiuqiXi/stock_US | 7f863b2f7ee3427d0a874a1f563bfae2ee497514 | 549ef3db14c0450fc5b4f84f1528956283974248 | refs/heads/master | 2023-06-29T11:37:09.212536 | 2021-08-01T03:04:26 | 2021-08-01T03:04:26 | 381,985,503 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,864 | py | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 8 16:19:24 2021
@author: xixiu
"""
import datetime
import time
import pandas as pd
from pandas import Series,DataFrame
from Api import download_data
from prophet import Prophet
import matplotlib.pyplot as plt
config = {
"alpha_vantage": {
"function":"TIME_SERIES_INTRADAY",
"key": "PR3XLLYLAN8V9CBY", # Claim your free API key here: https://www.alphavantage.co/support/#api-key
"symbol": "AMZN",
"outputsize": "full",
"interval": "1min",
"key_close": "4. close",
},
"data": {
"window_size": 3,
"train_split_size": 0.80,
},
"plots": {
"xticks_interval": 10, # show a date every 90 days
"color_actual": "#001f3f",
"color_train": "#3D9970",
"color_val": "#0074D9",
"color_pred_train": "#3D9970",
"color_pred_val": "#0074D9",
"color_pred_test": "#FF4136",
},
"model": {
"input_size": 1, # since we are only using 1 feature, close price
"num_lstm_layers": 2,
"lstm_size": 32,
"dropout": 0.2,
},
"training": {
"device": "cpu", # "cuda" or "cpu"
"batch_size": 64,
"num_epoch": 100,
"learning_rate": 0.01,
"scheduler_step_size": 40,
}
}
data_date, data_close_price, num_data_points, display_date_range = download_data(config)
df = {'ds': data_date,
'y': data_close_price}
df = DataFrame(df)
df['y'] = (df['y'] - df['y'].mean()) / (df['y'].std())
m = Prophet()
m.fit(df)
future = m.make_future_dataframe(periods=8000, freq='min')
future.tail()
forecast = m.predict(future)
m.plot(forecast)
# m.plot_components(forecast)
x1 = forecast['ds']
y1 = forecast['yhat']
y2 = forecast['yhat_lower']
y3 = forecast['yhat_upper']
plt.plot(x1,y1)
plt.plot(x1,y2)
plt.plot(x1,y3)
plt.show()
| [
"xixiuqi@outlook.com"
] | xixiuqi@outlook.com |
9b99c2c6ce68f75017d2b6b6789bddaa64d07d51 | 5c44b0d9c0253e266704d50b4a96a4302f2e3886 | /test_e2e/test_app.py | 50b1052f8fcda88b64d8d5218ae14483ccb9283a | [] | no_license | arjpaiva/DevOps-Course-Starter | 2c2e83b1d8ff28a31c90cf6c2aee1abae8940478 | ce2efc384d15d1f218b37c2fb5ca9059a12100e3 | refs/heads/master | 2023-07-10T13:07:16.153098 | 2021-05-06T10:19:13 | 2021-05-06T10:19:13 | 280,157,553 | 0 | 0 | null | 2021-05-06T10:19:14 | 2020-07-16T13:11:46 | Python | UTF-8 | Python | false | false | 2,517 | py | import os
import app
import pytest
import trello_service
from card import Status
from dotenv import load_dotenv, find_dotenv
from selenium import webdriver
from threading import Thread
@pytest.fixture(scope="module")
def driver():
opts = webdriver.ChromeOptions()
opts.add_argument('--headless')
opts.add_argument('--no-sandbox')
with webdriver.Chrome("./chromedriver", options=opts) as driver:
yield driver
@pytest.fixture(scope='module')
def new_board():
# Load env properties
file_path = find_dotenv('.env')
load_dotenv(file_path, override=True)
# Create the new board & update the board id environment variable
board_id = trello_service.create_board('selenium')
os.environ['BOARD_ID'] = board_id
lists = trello_service.get_lists_per_board()
if lists is None:
return
os.environ['TODO_LIST_ID'] = lists[Status.TODO]
os.environ['DOING_LIST_ID'] = lists[Status.DOING]
os.environ['DONE_LIST_ID'] = lists[Status.DONE]
# construct the new application
application = app.create_app()
# start the app in its own thread.
thread = Thread(target=lambda: application.run(use_reloader=False))
thread.daemon = True
thread.start()
yield app
# Tear Down
thread.join(1)
trello_service.delete_board()
def test_item_journey(driver, new_board):
driver.get('http://localhost:5000/')
assert driver.title == 'To-Do App'
todo_empty_list = driver.find_element_by_id('no-todo-items-message')
assert 'No items found' in str(todo_empty_list.text)
doing_empty_list = driver.find_element_by_id('no-doing-items-message')
assert 'No items found' in str(doing_empty_list.text)
done_empty_list = driver.find_element_by_id('no-done-items-message')
assert 'No items found' in str(done_empty_list.text)
driver.find_element_by_id('add-item').click()
driver.implicitly_wait(5)
new_title_input = driver.find_element_by_id('title')
new_title_input.send_keys('test-name')
new_title_input.submit()
driver.implicitly_wait(5)
todo_list = driver.find_element_by_id('todo-item-title')
assert 'test-name' in str(todo_list.text)
driver.find_element_by_id('move_item_to_doing').click()
doing_list = driver.find_element_by_id('doing-item-title')
assert 'test-name' in str(doing_list.text)
driver.find_element_by_id('move_item_to_done').click()
done_list = driver.find_element_by_id('done-item-title')
assert 'test-name' in str(done_list.text)
| [
"anarita.jegundodepaiva@aexp.com"
] | anarita.jegundodepaiva@aexp.com |
409113163ba422295c60390e520c2e239aed1d2a | 373aca822ab5068a4e4184821b72614cf45ebb89 | /app/dashboard/io_adafruit.py | b0c0b2cde21f4f4735d501d3bbfeac4cfe40dd2e | [] | no_license | ajinathkumbhar/iot-rasp-phms | 7c641ebd816f44dcf3f4ea1c34b924f1b48251cc | 6ed5988909961af23b1e604a0f2e827c41603aec | refs/heads/stable | 2020-04-07T03:17:21.338962 | 2019-03-07T17:17:23 | 2019-03-07T17:17:23 | 158,010,337 | 1 | 0 | null | 2019-03-07T17:17:24 | 2018-11-17T17:56:37 | Python | UTF-8 | Python | false | false | 3,465 | py | # Example of using the MQTT client class to subscribe to and publish feed values.
# Import standard python modules.
import random
import sys
import time
from app.other import utils
from app.sensors.accevents import AccEvents
import os
import datetime
# Import Adafruit IO MQTT client.
from Adafruit_IO import MQTTClient
from app.reports.reportmail import Pimail
import Queue
TAG = os.path.basename(__file__)
mEmail = Pimail()
qSens = Queue.Queue(maxsize=1)
mAccEvents = AccEvents()
#----------------------------------------
feedDeviceID = 'phmsdeviceid'
feedTemp = 'phmstempstatus'
feedHumi = 'phmshumistatus'
feedPulse = 'phmspulsestatus'
feedLastOnline = 'phmsstatus'
feedAccEventName = 'phmseventname'
feedAccEventTime = 'phmseventtime'
feedreport = 'phmsreport'
# Set to your Adafruit IO key.
# Remember, your key is a secret,
# so make sure not to publish it when you publish this code!
ADAFRUIT_IO_KEY = 'ce57f54de4464c2e8b2d2cccb2968072'
# Set to your Adafruit IO username.
# (go to https://accounts.adafruit.com to find your username)
ADAFRUIT_IO_USERNAME = 'ajinathkumbhar'
isClientConnected = False
mLast_sens_data = None
# Define callback functions which will be called when certain events happen.
def connected(client):
utils.PLOGD(TAG,'Connected to Adafruit IO! Listening for DemoFeed changes...')
# Subscribe to changes on a feed named DemoFeed.
client.subscribe(feedreport)
def disconnected(client):
# Disconnected function will be called when the mClient disconnects.
utils.PLOGD(TAG,'Disconnected from Adafruit IO!')
sys.exit(1)
def message(client, feed_id, payload):
utils.PLOGD(TAG,'Feed {0} received new value: {1}'.format(feed_id, payload))
if not qSens.empty() and int(payload):
utils.PLOGD(TAG,'------ send report ---------------')
sens = qSens.get()
mEmail.send(sens)
class ioAdafruitDash():
def __init__(self):
self.mClient = MQTTClient(ADAFRUIT_IO_USERNAME, ADAFRUIT_IO_KEY)
def setupClient(self):
# Setup the callback functions defined above.
self.mClient.on_connect = connected
self.mClient.on_disconnect = disconnected
self.mClient.on_message = message
# Connect to the Adafruit IO server.
self.mClient.connect()
# The first option is to run a thread in the background so you can continue
# doing things in your program.
self.mClient.loop_background()
print 'Connecting.',
while not self.mClient.is_connected():
print '.',
time.sleep(.5)
def update(self,sd):
if not self.mClient.is_connected():
utils.PLOGE(TAG,'Client not connected ... Check setupClient')
return
utils.PLOGD(TAG,"Update dashboard for : " + sd.device_id)
self.mClient.publish(feedDeviceID, str(sd.device_id))
self.mClient.publish(feedTemp, sd.temp)
self.mClient.publish(feedHumi, sd.humi)
self.mClient.publish(feedPulse, sd.hbeat)
self.mClient.publish(feedAccEventTime, sd.acc_event[0])
self.mClient.publish(feedAccEventName, mAccEvents.get_event_str(sd.acc_event[1]))
self.mClient.publish(feedLastOnline, datetime.datetime.now().strftime("%Y-%B-%d %H:%M:%S"))
if not qSens.empty():
sens = qSens.get()
utils.PLOGD(TAG,str(sens.temp))
if not qSens.full():
qSens.put(sd)
| [
"ajinathkumbhar@gmail.com"
] | ajinathkumbhar@gmail.com |
0260a70149d8404d3ddc7b54e1d98b54c8c83f32 | 466fe2851d646f9a048206c4cf2e8681a438fc13 | /w3resource_strings/remove_all_consecutive_duplicates.py | 28bb59f63a576dcf658f9392df95c6b9451a936e | [] | no_license | curious2015/30-days-of-code-python | e7b9de00585854849f847a2c44636af4b48c070a | d60bd480a2019e5b22d6339ea12537e6638195c8 | refs/heads/master | 2023-03-16T17:29:51.674695 | 2020-07-07T00:44:01 | 2020-07-07T00:44:01 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 575 | py | """
Remove all consecutive duplicates of a given string
"""
import itertools
str1 = "xxxxyyyyyyxxxxxxyyyyyyyy"
grouped = itertools.groupby(str1)
for group in grouped:
print(group)
for grouper in group[1]:
print(grouper)
result = ""
for i in grouped:
result += i[0]
print(result)
result = "".join(i for i, _ in itertools.groupby(str1))
print(result)
str1 = "xxxxyyyyyyxxxxxxyyyyyyyy"
lst = list(str1)
prev = str1[0]
i = 1
while i < len(lst):
if prev == lst[i]:
del lst[i]
else:
i += 1
prev = lst[i]
print("".join(lst))
| [
"korchak.tetiana@gmail.com"
] | korchak.tetiana@gmail.com |
e80525c8787bc3f08f9191d08d0fd04eff310d11 | fea0183acd5cfdbed49a1b4fd401187e07e496de | /userbot/plugins/afk.py | 14fb61295876666d487e598cd71acf3b2ec0ff30 | [
"MIT"
] | permissive | newtoworld/HardcoreUserbot | cc8979692f420f8d81828849bc14c804a6a03c76 | fcf2b5c5496a8f992e940fdc28f35ae5052a0c3e | refs/heads/master | 2021-02-08T21:16:13.595912 | 2020-03-01T18:30:00 | 2020-03-01T18:30:00 | 244,198,113 | 0 | 0 | null | 2020-03-01T18:02:21 | 2020-03-01T18:02:20 | null | UTF-8 | Python | false | false | 5,494 | py | """AFK Plugin for @UniBorg
Syntax: .afk REASON"""
import asyncio
import datetime
from telethon import events
from telethon.tl import functions, types
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
USER_AFK = {}
afk_time = None
last_afk_message = {}
@borg.on(events.NewMessage(outgoing=True)) # pylint:disable=E0602
async def set_not_afk(event):
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
current_message = event.message.message
if ".afk" not in current_message and "yes" in USER_AFK: # pylint:disable=E0602
try:
await borg.send_message( # pylint:disable=E0602
Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602
"Set AFK mode to False"
)
except Exception as e: # pylint:disable=C0103,W0703
await borg.send_message( # pylint:disable=E0602
event.chat_id,
"Please set `PRIVATE_GROUP_BOT_API_ID` " + \
"for the proper functioning of afk functionality " + \
"in @xtratgbot\nCheck pinned message for more info.\n\n `{}`".format(str(e)),
reply_to=event.message.id,
silent=True
)
USER_AFK = {} # pylint:disable=E0602
afk_time = None # pylint:disable=E0602
@borg.on(events.NewMessage(pattern=r"\.afk ?(.*)", outgoing=True)) # pylint:disable=E0602
async def _(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
global reason
USER_AFK = {}
afk_time = None
last_afk_message = {}
reason = event.pattern_match.group(1)
if not USER_AFK: # pylint:disable=E0602
last_seen_status = await borg( # pylint:disable=E0602
functions.account.GetPrivacyRequest(
types.InputPrivacyKeyStatusTimestamp()
)
)
if isinstance(last_seen_status.rules, types.PrivacyValueAllowAll):
afk_time = datetime.datetime.now() # pylint:disable=E0602
USER_AFK = f"yes: {reason}" # pylint:disable=E0602
if reason:
await event.edit(f"Set AFK mode to True, and Reason is {reason}")
else:
await event.edit(f"Set AFK mode to True")
await asyncio.sleep(5)
await event.delete()
try:
await borg.send_message( # pylint:disable=E0602
Config.PRIVATE_GROUP_BOT_API_ID, # pylint:disable=E0602
f"Set AFK mode to True, and Reason is {reason}"
)
except Exception as e: # pylint:disable=C0103,W0703
logger.warn(str(e)) # pylint:disable=E0602
@borg.on(events.NewMessage( # pylint:disable=E0602
incoming=True,
func=lambda e: bool(e.mentioned or e.is_private)
))
async def on_afk(event):
if event.fwd_from:
return
global USER_AFK # pylint:disable=E0602
global afk_time # pylint:disable=E0602
global last_afk_message # pylint:disable=E0602
afk_since = "**a while ago**"
current_message_text = event.message.message.lower()
if "afk" in current_message_text:
# userbot's should not reply to other userbot's
# https://core.telegram.org/bots/faq#why-doesn-39t-my-bot-see-messages-from-other-bots
return False
if USER_AFK and not (await event.get_sender()).bot: # pylint:disable=E0602
if afk_time: # pylint:disable=E0602
now = datetime.datetime.now()
datime_since_afk = now - afk_time # pylint:disable=E0602
time = float(datime_since_afk.seconds)
days = time // (24 * 3600)
time = time % (24 * 3600)
hours = time // 3600
time %= 3600
minutes = time // 60
time %= 60
seconds = time
if days == 1:
afk_since = "**Yesterday**"
elif days > 1:
if days > 6:
date = now + \
datetime.timedelta(
days=-days, hours=-hours, minutes=-minutes)
afk_since = date.strftime("%A, %Y %B %m, %H:%I")
else:
wday = now + datetime.timedelta(days=-days)
afk_since = wday.strftime('%A')
elif hours > 1:
afk_since = f"`{int(hours)}h{int(minutes)}m` **ago**"
elif minutes > 0:
afk_since = f"`{int(minutes)}m{int(seconds)}s` **ago**"
else:
afk_since = f"`{int(seconds)}s` **ago**"
msg = None
message_to_reply = f"My Master Has Been Gone For {afk_since}\nWhere He Is: ONLY GOD KNOWS " + \
f"\n\n__I promise I'll back in a few hours__\n**REASON**: {reason}" \
if reason \
else f"**HOLA NOOBS 😏**\n\n[Roses are red,\nViolets are blue,\nLeave me a message,\nAnd I'll get back to you...](https://telegra.ph/file/a42399b3c33aecb8d794c.jpg) "
msg = await event.reply(message_to_reply)
await asyncio.sleep(5)
if event.chat_id in last_afk_message: # pylint:disable=E0602
await last_afk_message[event.chat_id].delete() # pylint:disable=E0602
last_afk_message[event.chat_id] = msg # pylint:disable=E0602
| [
"noreply@github.com"
] | noreply@github.com |
75e15e4e4ea0b893f51a120fa48a56982b457760 | 8523d65983af15b637c0c205edfe2c0e06a5a259 | /coffea/processor/servicex/__init__.py | 0b5c5727df7b686bcf972657b798b931cdcc8cee | [
"BSD-3-Clause"
] | permissive | kmohrman/coffea | c77d723a4458aec6103da1d4f1465dc1dc0e8650 | 1963fc9371552b348a15084f5bde9390be1e6e1c | refs/heads/master | 2023-05-09T10:07:47.962120 | 2021-06-01T23:32:34 | 2021-06-01T23:32:34 | 373,205,081 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,757 | py | # Copyright (c) 2019, IRIS-HEP
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from .data_source import *
from .sx_qastle import *
from .analysis import *
from .local_executor import *
__all__ = [
"DataSource",
"FuncAdlDataset",
"sx_event_stream",
"Analysis",
"LocalExecutor",
]
| [
"ben@peartreestudio.net"
] | ben@peartreestudio.net |
d02675af36e561effeb7be12519b74aee750f8f3 | 66dc96d4ef8b7edcdfbb2eacdd95d6044974458f | /myvote/tests/views/test_myvote_explore.py | 9cdbf71b0952d7b46ac63f52ea267ba5ad71aa85 | [] | no_license | jessereitz/myvote | 26d627f3b501fe7c041c39f18cc4252178c3e672 | 63262c4b93d1298b7c0519da5fdc365f62eea13e | refs/heads/master | 2021-09-07T09:34:04.288183 | 2018-02-21T00:44:19 | 2018-02-21T00:44:19 | 118,203,451 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,521 | py | from django.contrib.auth.models import User
from django.urls import reverse, resolve
from django.test import TestCase
from myvote.models import Poll, Option, Vote
from tests.testing_helpers import create_test_user, create_polls
# CONSTANTS
POLLS_PER_PAGE = 10
USERNAME = 'testuser'
PASSWORD = 'testpassword12'
class ExploreTests(TestCase):
def setUp(self):
self.url = reverse('explore polls')
self.user = create_test_user(username=USERNAME, password=PASSWORD)
create_polls(self.user, amount=10)
def login_helper(self):
return self.client.login(username=USERNAME, password=PASSWORD)
def test_logged_out_view(self):
"""
Get request to explore view should return 200 and a list of ten
polls.
"""
get_response = self.client.get(self.url)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(len(get_response.context.get('polls')), POLLS_PER_PAGE)
def test_logged_in_view(self):
"""
Should return the same as test_logged_out_view.
"""
login = self.login_helper()
self.assertTrue(login)
get_response = self.client.get(self.url)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(len(get_response.context.get('polls')), POLLS_PER_PAGE)
def test_pagination_links_and_poll_lists(self):
"""
Tests for next/previous page links.
"""
create_polls(self.user, start_num=10, amount=30)
get_response = self.client.get(self.url)
# view should still return 10 polls in get request
self.assertEqual(len(get_response.context.get('polls')), POLLS_PER_PAGE)
# Get page 1
self.assertContains(get_response, "?page=2")
self.assertNotContains(get_response, "?page=1")
self.assertNotContains(get_response, "?page=3")
self.assertEqual(len(get_response.context.get('polls')), POLLS_PER_PAGE)
# page 2
get_page_2 = self.client.get(self.url + "?page=2")
self.assertContains(get_page_2, "?page=1")
self.assertContains(get_page_2, "?page=3")
self.assertNotContains(get_page_2, "?page=2")
self.assertEqual(len(get_response.context.get('polls')), POLLS_PER_PAGE)
# page 3
get_page_3 = self.client.get(self.url + "?page=3")
self.assertNotContains(get_page_3, "?page=1")
self.assertContains(get_page_3, "?page=2")
self.assertNotContains(get_page_3, "?page=3")
self.assertEqual(len(get_response.context.get('polls')), POLLS_PER_PAGE)
class ExploreRecentTests(TestCase):
def setUp(self):
self.user = create_test_user(username=USERNAME, password=PASSWORD)
# url to view self.user recent polls
self.url = reverse('explore recent polls', kwargs={'user_id':1})
def login_helper(self):
return self.client.login(username=USERNAME, password=PASSWORD)
def test_logged_out_view_less_than_10(self):
"""
Should display the most recent polls from user.
"""
create_polls(self.user, amount=3)
get_response = self.client.get(self.url)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(len(get_response.context.get('polls')), 3)
def test_logged_out_view_exactly_10(self):
"""
Should display the 10 most recent polls from user.
"""
create_polls(self.user, amount=10)
get_response = self.client.get(self.url)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(len(get_response.context.get('polls')), 10)
self.assertNotContains(get_response, '?page=2')
def test_logged_out_view_more_than_10(self):
"""
Should display the 10 most recent polls from user with pagination.
"""
create_polls(self.user, amount=20)
get_response = self.client.get(self.url)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(len(get_response.context.get('polls')), 10)
self.assertContains(get_response, '?page=2')
def test_logged_in_view_less_than_10(self):
"""
Should display the most recent polls from user.
"""
login = self.login_helper()
self.assertTrue(login)
create_polls(self.user, amount=3)
get_response = self.client.get(self.url)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(len(get_response.context.get('polls')), 3)
def test_logged_in_view_exactly_10(self):
"""
Should display the 10 most recent polls from user.
"""
login = self.login_helper()
self.assertTrue(login)
create_polls(self.user, amount=10)
get_response = self.client.get(self.url)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(len(get_response.context.get('polls')), 10)
self.assertNotContains(get_response, '?page=2')
def test_logged_in_view_more_than_10(self):
"""
Should display the 10 most recent polls from user with pagination.
"""
login = self.login_helper()
self.assertTrue(login)
create_polls(self.user, amount=20)
get_response = self.client.get(self.url)
self.assertEqual(get_response.status_code, 200)
self.assertEqual(len(get_response.context.get('polls')), 10)
self.assertContains(get_response, '?page=2')
| [
"jessereitz1@gmail.com"
] | jessereitz1@gmail.com |
4c01dcd4b766e3193db66c69655f283ea5b409d4 | 3283190a5998be4d9adda81671920de2cb9880b3 | /code/temple.py | 61749da68364939f2b932b8c25380e4f128aec01 | [] | no_license | tritechsc/mcpi-python | 0c114471376d80294be98aa839da0026926f58db | a49e95172796cfc78736ee102c5b4fcdd7f28a5c | refs/heads/master | 2023-06-03T09:03:08.940250 | 2021-06-24T21:34:59 | 2021-06-24T21:34:59 | 124,266,394 | 3 | 2 | null | null | null | null | UTF-8 | Python | false | false | 7,287 | py | #Juan Barraza
mc = Minecraft.create("10.183.0.2", 4711)
from mcpi.minecraft import Minecraft
from mcpi import block
mc = Minecraft.create()
x, y, z = mc.player.getPos()
zz = z + 1
mc.setBlock(x,y, zz, 7)
mc.setBlock(x+1,y,zz, 7)
mc.setBlock(x+2,y,zz, 7)
mc.setBlock(x+3,y,zz, 7)
mc.setBlock(x+4,y,zz, 7)
mc.setBlock(x-1,y,zz, 7)
mc.setBlock(x-2,y,zz, 7)
mc.setBlock(x-3,y,zz, 7)
mc.setBlock(x-4,y,zz, 7)
mc.setBlock(x-4,y,zz+1, 7)
mc.setBlock(x-4,y,zz+2, 7)
mc.setBlock(x-4,y,zz+3, 7)
mc.setBlock(x-4,y,zz+4, 7)
mc.setBlock(x-4,y,zz+5, 7)
mc.setBlock(x-4,y,zz+6, 7)
mc.setBlock(x+4,y,zz+1, 7)
mc.setBlock(x+4,y,zz+2, 7)
mc.setBlock(x+4,y,zz+3, 7)
mc.setBlock(x+4,y,zz+4, 7)
mc.setBlock(x+4,y,zz+5, 7)
mc.setBlock(x+4,y,zz+6, 7)
mc.setBlock(x,y,zz+6, 7)
#Side Walls
mc.setBlock(x+1,y,zz+6, 7)
mc.setBlock(x+2,y,zz+6, 7)
mc.setBlock(x+3,y,zz+6, 7)
mc.setBlock(x+4,y,zz+6, 7)
mc.setBlock(x-1,y,zz+6, 7)
mc.setBlock(x-2,y,zz+6, 7)
mc.setBlock(x-3,y,zz+6, 7)
mc.setBlock(x-4,y,zz+6, 7)
mc.setBlock(x-3,y,zz+1, 7)
mc.setBlock(x-3,y,zz+2, 7)
mc.setBlock(x-3,y,zz+3, 7)
mc.setBlock(x-3,y,zz+4, 7)
mc.setBlock(x-3,y,zz+5, 7)
mc.setBlock(x-2,y,zz+1, 7)
mc.setBlock(x-2,y,zz+2, 7)
mc.setBlock(x-2,y,zz+3, 7)
mc.setBlock(x-2,y,zz+4, 7)
mc.setBlock(x-2,y,zz+5, 7)
mc.setBlock(x-1,y,zz+1, 7)
mc.setBlock(x-1,y,zz+2, 7)
mc.setBlock(x-1,y,zz+3, 7)
mc.setBlock(x-1,y,zz+4, 7)
mc.setBlock(x-1,y,zz+5, 7)
mc.setBlock(x,y,zz+1, 7)
mc.setBlock(x,y,zz+2, 7)
mc.setBlock(x,y,zz+3, 7)
mc.setBlock(x,y,zz+4, 7)
mc.setBlock(x,y,zz+5, 7)
mc.setBlock(x+1,y,zz+1, 7)
mc.setBlock(x+1,y,zz+2, 7)
mc.setBlock(x+1,y,zz+3, 7)
mc.setBlock(x+1,y,zz+4, 7)
mc.setBlock(x+1,y,zz+5, 7)
mc.setBlock(x+2,y,zz+1, 7)
mc.setBlock(x+2,y,zz+2, 7)
mc.setBlock(x+2,y,zz+3, 7)
mc.setBlock(x+2,y,zz+4, 7)
mc.setBlock(x+2,y,zz+5, 7)
mc.setBlock(x+3,y,zz+1, 7)
mc.setBlock(x+3,y,zz+2, 7)
mc.setBlock(x+3,y,zz+3, 7)
mc.setBlock(x+3,y,zz+4, 7)
mc.setBlock(x+3,y,zz+5, 7)
#Corner Towers
mc.setBlock(x-4, y+1,zz, 7)
mc.setBlock(x-4, y+2,zz, 7)
mc.setBlock(x-4, y+3,zz, 7)
mc.setBlock(x-4, y+4,zz, 7)
mc.setBlock(x-4, y+5,zz, 7)
mc.setBlock(x+4, y+1,zz, 7)
mc.setBlock(x+4, y+2,zz, 7)
mc.setBlock(x+4, y+3,zz, 7)
mc.setBlock(x+4, y+5,zz, 7)
mc.setBlock(x-4, y+1,zz+1, 7)
mc.setBlock(x-4, y+1,zz+2, 7)
mc.setBlock(x-4, y+1,zz+3, 7)
mc.setBlock(x-4, y+1,zz+4, 7)
mc.setBlock(x-4, y+1,zz+5, 7)
mc.setBlock(x-4, y+1,zz+6, 7)
mc.setBlock(x+4, y+1,zz+1, 7)
mc.setBlock(x+4, y+1,zz+2, 7)
mc.setBlock(x+4, y+1,zz+3, 7)
mc.setBlock(x+4, y+1,zz+4, 7)
mc.setBlock(x+4, y+1,zz+5, 7)
mc.setBlock(x+4, y+1,zz+6, 7)
mc.setBlock(x+4, y+2,zz+6, 7)
mc.setBlock(x+4, y+3,zz+6, 7)
mc.setBlock(x+4, y+4,zz+6, 7)
mc.setBlock(x+4, y+5,zz+6, 7)
mc.setBlock(x-4, y+2,zz+6, 7)
mc.setBlock(x-4, y+3,zz+6, 7)
mc.setBlock(x-4, y+4,zz+6, 7)
mc.setBlock(x-4, y+5,zz+6, 7)
#tower connecters
mc.setBlock(x-3, y+4,zz+6, 7)
mc.setBlock(x-2, y+4,zz+6, 7)
mc.setBlock(x-1, y+4,zz+6, 7)
mc.setBlock(x, y+4,zz+6, 7)
mc.setBlock(x+3, y+4,zz+6, 7)
mc.setBlock(x+2, y+4,zz+6, 7)
mc.setBlock(x+1, y+4,zz+6, 7)
mc.setBlock(x+4, y+4,zz+1, 7)
mc.setBlock(x+4, y+4,zz+2, 7)
mc.setBlock(x+4, y+4,zz+3, 7)
mc.setBlock(x+4, y+4,zz+4, 7)
mc.setBlock(x+4, y+4,zz+5, 7)
mc.setBlock(x-4, y+4,zz+1, 7)
mc.setBlock(x-4, y+4,zz+2, 7)
mc.setBlock(x-4, y+4,zz+3, 7)
mc.setBlock(x-4, y+4,zz+4, 7)
mc.setBlock(x-4, y+4,zz+5, 7)
mc.setBlock(x+4, y+4,zz, 7)
mc.setBlock(x+3, y+4,zz, 7)
mc.setBlock(x+2, y+4,zz, 7)
mc.setBlock(x+1, y+4,zz, 7)
mc.setBlock(x, y+4,zz, 7)
mc.setBlock(x-4, y+4,zz, 7)
mc.setBlock(x-3, y+4,zz, 7)
mc.setBlock(x-2, y+4,zz, 7)
mc.setBlock(x-1, y+4,zz, 7)
#Walls and Windows
mc.setBlock(x+2, y+1, zz, 7)
mc.setBlock(x+3, y+1, zz, 7)
mc.setBlock(x+2, y+2, zz, 7)
mc.setBlock(x+2, y+3, zz, 7)
mc.setBlock(x-2, y+1, zz, 7)
mc.setBlock(x-3, y+1, zz, 7)
mc.setBlock(x-2, y+2, zz, 7)
mc.setBlock(x-2, y+3, zz, 7)
mc.setBlock(x, y+1, zz+6, 7)
mc.setBlock(x+1, y+1, zz+6, 7)
mc.setBlock(x+2, y+1, zz+6, 7)
mc.setBlock(x+3, y+1, zz+6, 7)
mc.setBlock(x+4, y+1, zz+6, 7)
mc.setBlock(x-1, y+1, zz+6, 7)
mc.setBlock(x-2, y+1, zz+6, 7)
mc.setBlock(x-3, y+1, zz+6, 7)
mc.setBlock(x-4, y+1, zz+6, 7)
mc.setBlock(x-4, y+1, zz+6, 7)
mc.setBlock(x-4, y+2, zz+2, 7)
mc.setBlock(x-4, y+3, zz+2, 7)
mc.setBlock(x-4, y+2, zz+4, 7)
mc.setBlock(x-4, y+3, zz+4, 7)
mc.setBlock(x+4, y+2, zz+2, 7)
mc.setBlock(x+4, y+3, zz+2, 7)
mc.setBlock(x+4, y+2, zz+4, 7)
mc.setBlock(x+4, y+3, zz+4, 7)
mc.setBlock(x-2, y+2, zz+6, 7)
mc.setBlock(x-2, y+3, zz+6, 7)
mc.setBlock(x+4, y+2, zz+6, 7)
mc.setBlock(x+4, y+3, zz+6, 7)
mc.setBlock(x+2, y+2, zz+6, 7)
mc.setBlock(x+2, y+3, zz+6, 7)
#API Blocks
#====================
<<<<<<< HEAD:temple-jb.py
"""
AIR 0
STONE 1
GRASS 2
DIRT 3
COBBLESTONE 4
WOOD_PLANKS 5
SAPLING 6
BEDROCK 7
WATER_FLOWING 8
WATER 8
WATER_STATIONARY 9
LAVA_FLOWING 10
LAVA 10
LAVA_STATIONARY 11
SAND 12
GRAVEL 13
GOLD_ORE 14
IRON_ORE 15
COAL_ORE 16
WOOD 17
LEAVES 18
GLASS 20
LAPIS_LAZULI_ORE 21
LAPIS_LAZULI_BLOCK 22
SANDSTONE 24
BED 26
COBWEB 30
GRASS_TALL 31
WOOL 35
FLOWER_YELLOW 37
FLOWER_CYAN 38
MUSHROOM_BROWN 39
MUSHROOM_RED 40
GOLD_BLOCK 41
IRON_BLOCK 42
STONE_SLAB_DOUBLE 43
STONE_SLAB 44
BRICK_BLOCK 45
TNT 46
BOOKSHELF 47
MOSS_STONE 48
OBSIDIAN 49
TORCH 50
FIRE 51
STAIRS_WOOD 53
CHEST 54
DIAMOND_ORE 56
DIAMOND_BLOCK 57
CRAFTING_TABLE 58
FARMLAND 60
FURNACE_INACTIVE 61
FURNACE_ACTIVE 62
DOOR_WOOD 64
LADDER 65
STAIRS_COBBLESTONE 67
DOOR_IRON 71
REDSTONE_ORE 73
SNOW 78
ICE 79
SNOW_BLOCK 80
CACTUS 81
CLAY 82
SUGAR_CANE 83
FENCE 85
GLOWSTONE_BLOCK 89
BEDROCK_INVISIBLE 95
STONE_BRICK 98
GLASS_PANE 102
MELON 103
FENCE_GATE 107
GLOWING_OBSIDIAN 246
NETHER_REACTOR_CORE 247
"""
=======
# AIR 0
# STONE 1
# GRASS 2
# DIRT 3
# COBBLESTONE 4
# WOOD_PLANKS 5
# SAPLING 6
# BEDROCK 7
# WATER_FLOWING 8
# WATER 8
# WATER_STATIONARY 9
# LAVA_FLOWING 10
# LAVA 10
# LAVA_STATIONARY 11
# SAND 12
# GRAVEL 13
# GOLD_ORE 14
# IRON_ORE 15
# COAL_ORE 16
# WOOD 17
# LEAVES 18
# GLASS 20
# LAPIS_LAZULI_ORE 21
# LAPIS_LAZULI_BLOCK 22
# SANDSTONE 24
# BED 26
# COBWEB 30
# GRASS_TALL 31
# WOOL 35
>>>>>>> a4cb0419f8c86ea4ac07707f2d3d9554b5b5ab2b:temple.py
| [
"sift@owt.com"
] | sift@owt.com |
42d1fff690b31833db3a4e0505896168d8e455d6 | 2bb9ed1174287932f180f7e52dba4847499ecb67 | /treetest2/abstract_base.py | 93e92e93d0f8cb17fab10d5a71df3d51e0c6c94a | [
"MIT"
] | permissive | Christian-B/my_spinnaker | 1727dcfd965d4f724adcfc896b05e75cea8e1aed | fc0e52b90ec3ecc16d721abffb569e334fea885c | refs/heads/master | 2023-07-21T18:21:12.906743 | 2023-07-20T10:11:04 | 2023-07-20T10:11:04 | 83,024,419 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,555 | py | # Trimmed down version of abc.py
# If usin #@add_metaclass require from six import add_metaclass
def abstractmethod(funcobj):
"""A decorator indicating abstract methods.
Requires that the metaclass is AbstractBase or derived from it. A
class that has a metaclass derived from AbstractBase cannot be
instantiated unless all of its abstract methods are overridden.
The abstract methods can be called using any of the normal
'super' call mechanisms.
Usage:
#@add_metaclass(AbstractBase)
class C:
@abstractmethod
def my_abstract_method(self, ...):
...
"""
funcobj.__isabstractmethod__ = True
return funcobj
class abstractproperty(property):
"""A decorator indicating abstract properties.
Requires that the metaclass is AbstractBase or derived from it. A
class that has a metaclass derived from AbstractBase cannot be
instantiated unless all of its abstract properties are overridden.
The abstract properties can be called using any of the normal
'super' call mechanisms.
Usage:
#@add_metaclass(AbstractBase)
class C:
@abstractproperty
def my_abstract_property(self):
...
This defines a read-only property; you can also define a read-write
abstract property using the 'long' form of property declaration:
#@add_metaclass(AbstractBase)
class C:
def getx(self): ...
def setx(self, value): ...
x = abstractproperty(getx, setx)
"""
__isabstractmethod__ = True
class AbstractBase(type):
"""Metaclass for defining Abstract Base Classes (AbstractBases).
Use this metaclass to create an AbstractBase.
An AbstractBase can be subclassed directly,
and then acts as a mix-in class.
This is a trimmed down version of ABC.
Unlike ABC you can not register unrelated concrete classes.
"""
def __new__(mcls, name, bases, namespace):
cls = super(AbstractBase, mcls).__new__(mcls, name, bases, namespace)
abstracts = set(name for name, value in namespace.items() if
getattr(value, "__isabstractmethod__", False))
for base in bases:
for name in getattr(base, "__abstractmethods__", set()):
value = getattr(cls, name, None)
if getattr(value, "__isabstractmethod__", False):
abstracts.add(name)
cls.__abstractmethods__ = frozenset(abstracts)
return cls
| [
"brenninc@cs.man.ac.uk"
] | brenninc@cs.man.ac.uk |
330058010818406687c80f7723e26b445b282e69 | 5be2fc94724cc05d2dc449e0f5b40d9fb07edd51 | /tests/test_biosample.py | 4e618528f17766b407935e78b014a86d7a17a3b8 | [
"MIT"
] | permissive | LucaCappelletti94/encodeproject | b84614683c8652f812f2c01b0002903d849a080b | a2bcae8cfbb505a978ecea95c3a007f65625c57a | refs/heads/master | 2022-05-07T13:16:58.774258 | 2022-04-27T07:51:22 | 2022-04-27T07:51:22 | 216,822,791 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from encodeproject import biosample, biosamples
def test_biosample():
biosample("ENCSR000EDP")
biosample("ENCSR000EDP", False)
def test_biosamples():
biosamples(["ENCFF454HMH", "ENCFF663AYS"])
biosamples(["ENCSR000EDP"], False) | [
"cappelletti.luca94@gmail.com"
] | cappelletti.luca94@gmail.com |
9a8deb9549b70baf68049a803169f4d62d7914ea | 4369ee62d49399c7214389d0fc255b32fd302f1e | /train/tf_fix.py | 39f6f563a77216514d533048ca18955ada7f3cd9 | [] | no_license | BLANK2077/OpenVPU | 52846fa44140ac3c4897be5e1382189f405afcd0 | 93fd6d59d718b2088607c2f55c1ec7947ab38073 | refs/heads/master | 2020-06-11T17:26:48.636272 | 2019-05-24T08:06:26 | 2019-05-24T08:06:26 | 194,036,245 | 2 | 0 | null | 2019-06-27T06:31:40 | 2019-06-27T06:31:40 | null | UTF-8 | Python | false | false | 7,745 | py | # -*- coding: utf-8 -*-
import input_data
import tensorflow as tf
import numpy as np
import math
import struct
Tc=32
Tk=16
Tp=16
Logic_MEM_DEP=256
Logic_MEM_NUM=16
BIT_WIDTH=16;
def Get_FeatureLength(H,W,CH):
return (Tk*H*W*math.floor((CH+Tk-1)/Tk))
def Get_WeightLength(Ky,Kx,CHin,CHout):
return (Tc*Kx*Ky*CHout*math.floor((CHin+Tc-1)/Tc))
def To_Fixed(tensor,bitwidth):
array=tensor.eval();
range=max(np.max(array),-np.min(array))
int_part=max(math.ceil(math.log(range,2)+0.000001),0) + 1 #1 bit for sign
fraction_part=bitwidth-int_part
return ( np.round(array*pow(2,fraction_part)) , fraction_part ) #/pow(2,fraction_part)
def Feature_To_Fixed(tensor,bitwidth,feed_dict):
array=tensor.eval(feed_dict=feed_dict);
range=max(np.max(array),-np.min(array))
#print range;
int_part=max(math.ceil(math.log(range,2)+0.000001),0) + 1 #1 bit for sign
fraction_part=bitwidth-int_part
return ( np.round(array*pow(2,fraction_part)) , fraction_part ) #/pow(2,fraction_part)
def Map_Weight_Data(kernel,mem,Ky,Kx,in_ch,out_ch):
addr=0;
for k in range(0,out_ch,Tk):
for l in range(0,in_ch,Tc):
for i in range(Ky):
for j in range(Kx):
for kk in range(k,k+Tk):
if(kk<out_ch):
for ll in range(l,l+Tc,Tk):
tp=[];
for lll in range(ll,ll+Tk):
if lll<in_ch:
tp.append(kernel[i][j][lll][kk]);#kernel[i*Kx*in_ch*out_ch+j*in_ch*out_ch+lll*out_ch+kk]);
else:
tp.append(0);
for cp in range(Tk):
#print("k:"+str(k)+",l:"+str(l)+",i:"+str(i)+",j:"+str(j)+",kk:"+str(kk)+",ll:"+str(ll)+",lll:"+str(lll)+":"+str(addr+cp));
mem[addr+cp]=tp[cp];
addr=addr+Tk;
def Map_Bias_Data(dat,mem,channel):
for i in range(0,channel,Tk):
for ii in range(i,i+Tk):
if(ii<channel):
mem[ii]=dat[ii];
else:
mem[ii]=0;
def Get_Feature_Fraction_Part(tensor,name,feed_dict,file):
(array,fraction_part)=Feature_To_Fixed(tensor,BIT_WIDTH,feed_dict);
file.write("#define %s %d\n" % ("PTR_"+name.upper(),int(fraction_part)) );
#print(name+' fraction_part: ' + str(int(fraction_part)));
def Record_Weight(tensor,name,file):
(array,fraction_part)=To_Fixed(tensor,BIT_WIDTH);
file.write("#define %s %d\n" % ("PTR_"+name.upper(),int(fraction_part)) );
#print(name+' fraction_part: ' + str(fraction_part));
OneD_array_size=Get_WeightLength(np.shape(array)[0],np.shape(array)[1],np.shape(array)[2],np.shape(array)[3]);
OneD_array=[0]*OneD_array_size;
Map_Weight_Data(array,OneD_array,np.shape(array)[0],np.shape(array)[1],np.shape(array)[2],np.shape(array)[3]);
print("struct Mapped_Weight *%s=Malloc_Weight(%d,%d,%d,%d,%s);" % (name,np.shape(array)[0],np.shape(array)[1],np.shape(array)[2],np.shape(array)[3],"PTR_"+name.upper()) )
print("Load_Weight_From_File(%s,\"%s\");\n" % (name,name+'.bin') );
with open('./record/'+name+'.bin', 'wb') as fp:
for i in range(OneD_array_size):
a=struct.pack('h',int(OneD_array[i]))
fp.write(a)
def Record_Bias(tensor,name,file):
(array,fraction_part)=To_Fixed(tensor,BIT_WIDTH);
file.write("#define %s %d\n" % ("PTR_"+name.upper(),int(fraction_part)) );
#print(name+' fraction_part: ' + str(fraction_part));
OneD_array_size=Get_FeatureLength(1,1,np.shape(array)[0]);
OneD_array=[0]*OneD_array_size;
Map_Bias_Data(array,OneD_array,np.shape(array)[0]);
print("struct Mapped_Feature *%s=Malloc_Feature(1,1,%d,%s,0,-1,-1);" % (name,np.shape(array)[0],"PTR_"+name.upper()) )
print("Load_Feature_From_File(%s,\"%s\");\n" % (name,name+'.bin') );
with open('./record/'+name+'.bin', 'wb') as fp:
for i in range(OneD_array_size):
a=struct.pack('h',int(OneD_array[i]))
fp.write(a)
def Record_Conv_Cfg(Hin,Win,CHin,CHout,Kx,Ky,Sx,Sy,pad_left,pad_right,pad_up,pad_down,layername,file):
mininum_bw=0;
out_width=(math.floor((Win+pad_left+pad_right-Kx)/Sx)+1);
out_height=(math.floor((Hin+pad_up+pad_down-Ky)/Sy)+1);
overlap=Ky-Sy;
entries_per_line=Win*math.floor((CHin+Tc-1)/Tc);
dat_banks_restrict=math.floor((entries_per_line*Ky+Logic_MEM_DEP-1)/Logic_MEM_DEP);
wt_banks_restrict=math.floor((Kx*Ky*Tk*math.floor((CHin+Tc-1)/Tc)+Logic_MEM_DEP-1)/Logic_MEM_DEP);
if((dat_banks_restrict+wt_banks_restrict)>Logic_MEM_NUM):
printf("Error: CBUF entries not enough, you should split your "+layername+" into at least "+str((dat_banks_restrict+wt_banks_restrict)/Logic_MEM_NUM)+" pieces!!!\n");
return
for dat_buf_num in range(int(dat_banks_restrict),int(Logic_MEM_NUM-wt_banks_restrict)):
wt_banks=Logic_MEM_NUM-dat_buf_num;
out_ch_slice=math.floor( (Logic_MEM_DEP*wt_banks)/(Kx*Ky*Tk*math.floor((CHin+Tc-1)/Tc)) ) *Tk;
if(out_ch_slice>=CHout):
out_ch_slice=CHout;
N=1;
else:
N=math.floor((CHout+out_ch_slice-1)/out_ch_slice);
if(CHout%out_ch_slice==0):
out_ch_slice_last=out_ch_slice;
else:
out_ch_slice_last=CHout%out_ch_slice;
out_height_first=math.floor((math.floor((Logic_MEM_DEP*dat_buf_num)/entries_per_line)+pad_up-Ky)/Sy)+1;
in_height_first=(out_height_first-1)*Sy+Ky-pad_up;
out_height_middle=math.floor((math.floor((Logic_MEM_DEP*dat_buf_num)/entries_per_line)-Ky)/Sy)+1;
in_height_middle=(out_height_middle-1)*Sy+Ky;
if(out_height_first>=out_height):
out_height_first=out_height;
in_height_first=Hin;
if((out_height-out_height_first)%out_height_middle == 0):
K=math.floor((out_height-out_height_first)/out_height_middle)+1;
out_height_last=out_height_middle;
else:
K=math.floor((out_height-out_height_first)/out_height_middle)+2;
out_height_last=(out_height-out_height_first)%out_height_middle;
in_height_last=Hin-in_height_first+overlap-(K-2)*(in_height_first-overlap);
total_bw_K_to_N=(entries_per_line*Hin+entries_per_line*overlap*(K-1))*N+Kx*Ky*CHout*math.floor((CHin+Tc-1)/Tc);
total_bw_N_to_K=K*Kx*Ky*CHout*math.floor((CHin+Tc-1)/Tc)+entries_per_line*Hin+entries_per_line*overlap*(K-1);
if((mininum_bw==0) or (total_bw_K_to_N<mininum_bw)):
best_dat_banks=dat_buf_num;
mininum_bw=total_bw_K_to_N;
best_method=0;
if((mininum_bw==0) or (total_bw_N_to_K<mininum_bw)):
best_dat_banks=dat_buf_num;
mininum_bw=total_bw_N_to_K;
best_method=1;
dat_buf_num=best_dat_banks;
wt_banks=Logic_MEM_NUM-dat_buf_num;
out_ch_slice=math.floor( (Logic_MEM_DEP*wt_banks)/(Kx*Ky*Tk*math.floor((CHin+Tc-1)/Tc)) ) *Tk;
if(out_ch_slice>=CHout):
out_ch_slice=CHout;
N=1;
else:
N=math.floor((CHout+out_ch_slice-1)/out_ch_slice);
if(CHout%out_ch_slice==0):
out_ch_slice_last=out_ch_slice;
else:
out_ch_slice_last=CHout%out_ch_slice;
out_height_first=math.floor((math.floor((Logic_MEM_DEP*dat_buf_num)/entries_per_line)+pad_up-Ky)/Sy)+1;
in_height_first=(out_height_first-1)*Sy+Ky-pad_up;
out_height_middle=math.floor((math.floor((Logic_MEM_DEP*dat_buf_num)/entries_per_line)-Ky)/Sy)+1;
in_height_middle=(out_height_middle-1)*Sy+Ky;
if(out_height_first>=out_height):
out_height_first=out_height;
in_height_first=Hin;
if((out_height-out_height_first)%out_height_middle == 0):
K=math.floor((out_height-out_height_first)/out_height_middle)+1;
out_height_last=out_height_middle;
else:
K=math.floor((out_height-out_height_first)/out_height_middle)+2;
out_height_last=(out_height-out_height_first)%out_height_middle;
in_height_last=Hin-in_height_first+overlap-(K-2)*(in_height_first-overlap);
file.write("struct Conv_Cfg %s={%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d};\n" % (layername+"_cfg",
CHin,Win,CHout,
overlap,Kx,Ky,Sx,Sy,pad_left,pad_up,
best_dat_banks,best_method,
out_width,out_height,
entries_per_line,(Tc*2*Kx*Ky*CHout*math.floor((CHin+Tc-1)/Tc)),
K,
in_height_first,in_height_middle,in_height_last,
out_height_first,out_height_middle,out_height_last,
N,
out_ch_slice,
out_ch_slice_last));
| [
"16210720048@fudan.edu.cn"
] | 16210720048@fudan.edu.cn |
010c5b1c21a0180900f7f5bfc873f53bdb929512 | 20c3fe5d914b190693aacca5640dcb5a9162f47a | /info/models.py | 1a215b7bbad59a0e80df9f62522d71bb323ebaee | [] | no_license | Chandole/django-project | 3fde1d5985295b3f24771efead10fdd0da6c5a1c | 8051f975c2775089576f19db8df47b569eb97f1a | refs/heads/main | 2023-06-07T15:08:21.643999 | 2021-07-04T07:42:19 | 2021-07-04T07:42:19 | 382,791,977 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 346 | py | from django.db import models
# Create your models here.
class Register(models.Model):
name=models.CharField(max_length=100)
email=models.CharField(max_length=100)
mob=models.CharField(max_length=100)
branch=models.CharField(max_length=100)
password=models.CharField(max_length=100)
class Meta:
db_table='register' | [
"shubham@gmail.com"
] | shubham@gmail.com |
59fb611140555a4354a5f36aca5c99ad5070dc22 | bbb765f1f14c08f119e58a034ec69e1720e3b5fc | /network_server.py | b7922e6c2a491af708463de8f328e08c154e6f9b | [] | no_license | mainakch/mobilefs | 19d6c7f16f65c1008d6ef2868aa216f9c50eb2fc | 153f7b59167e2e7e1f4afd30a7a4a6e90b5f30da | refs/heads/master | 2021-06-01T16:49:01.226410 | 2019-12-07T17:06:11 | 2019-12-07T17:06:11 | 31,227,493 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,564 | py | #!/usr/bin/env python2
'''
network_server.py - Executes remote requests and send responses back
'''
from constants import *
log = logging.getLogger('network_server')
log.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
ch.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
log.addHandler(ch)
class Networkserver():
def __init__(self, server_address, port):
self.window = WINDOW
self.lastsent = 0
self.lastreceived = 0
self.unacknowledged_packets = {} #this stores the keys of packets in flight and timestamp when sent
self.time_sleep = 0.0000000000000000003
#socket address
self.public_address = (server_address, port)
#list of sockets
self.network_server = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
#initialize the sockets
try:
self.network_server.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.network_server.bind(self.public_address)
except Exception as ex:
#pass
log.debug(ex)
self.inputs = [self.network_server]
self.outputs = [self.network_server]
#queues
#key=remote taskid, value = list of chunks received so far
self.recv_list_of_chunks = {}
#key = taskid, value = request
self.transmit_queue = {}
#key = original_taskid, value = response
self.receive_queue = {}
#key = (priority, taskid, original_taskid, chunknum, chunktotalnum, timestamp when added to queue/transmitted), value = chunkof request
self.chunk_queue = {}
#key = (taskid, original_taskid, chunknum, chunktotalnum, timestamp when received), value=chunkofresponse
self.receive_chunk_queue = {}
#(received taskid, timestamp)
self.completed_tasks = {}
#timestamp of last transmission
self.timestamp_transmission = 0
#mapping from socket object to taskid
self.order_of_keys_in_chunk_queue = []
self.taskid = randint(0, 1002039)
self.packets_in_flight = 0
def execute_message(self, taskstring):
log.debug('inside execute_message: %s' % taskstring)
args = pickle.loads(taskstring)
response = None
try:
if args[0] == 'chmod':
os.chmod(args[1], args[2])
if args[0] == 'setattr':
pathname = args[1]
attr = args[2]
if attr.st_mode is not None:
os.chmod(pathname, attr.st_mode)
if attr.st_size is not None:
try:
fd = os.open(pathname, os.O_RDWR)
os.ftruncate(fd, attr.st_size)
os.close(fd)
except Exception as exc:
log.debug('Error in truncate %s' % repr(exc))
pass
if (attr.st_atime is not None) or (attr.st_mtime is not None):
os.utime(pathname, (attr.st_atime, attr.st_mtime))
if (attr.st_gid is not None) or (attr.st_uid is not None):
os.lchown(pathname, attr.st_uid, attr.st_gid)
if args[0] == 'close':
os.close(args[1])
if args[0] == 'link':
os.link(args[1], args[2])
if args[0] == 'listdir':
list_of_dirs = os.listdir(args[1])
response = []
for name in list_of_dirs:
fullname = os.path.join(args[1], name)
if not os.path.islink(fullname):
stat = os.lstat(fullname)
entry = Entryattributes(stat)
response.append((name, entry, fullname))
if args[0] == 'lseekread':
os.lseek(args[1], args[2], 0)
response = b64encode(os.read(args[1], args[3]))
if args[0] == 'lseekwrite':
os.lseek(args[1], args[2], 0)
response = os.write(args[1], b64decode(args[3]))
if args[0] == 'lstat':
response = os.lstat(args[1])
if args[0] == 'mkdir':
os.mkdir(args[1], args[2])
if args[0] == 'mknod':
os.mknod(args[1], args[2], args[3])
if args[0] == 'open':
response = os.open(args[1], args[2])
if args[0] == 'readlink':
response = os.readlink(args[1])
if args[0] == 'rename':
response = os.rename(args[1], args[2])
if args[0] == 'rmdir':
response = os.rmdir(args[1])
if args[0] == 'statvfs':
response = os.statvfs(args[1])
if args[0] == 'symlink':
response = os.symlink(args[1], args[2])
if args[0] == 'unlink':
response = os.unlink(args[1])
if args[0] == 'access':
response = os.access(args[1], args[2])
except OSError as exc:
response = exc
if response is None:
return pickle.dumps(('non', response))
if isinstance(response, Exception):
return pickle.dumps(('err', response.errno))
else:
return pickle.dumps(('res', response))
def handle_remote_request(self, s):
#log.debug('Received request from network_client')
try:
#s is a network client connection
data, self.network_client_address = s.recvfrom(DATAGRAM_SIZE)
obj = pickle.loads(data)
self.lastreceived = time.time()
if obj[2] == 'hrt':
log.debug('Heartbeat received')
log.debug('Client address: %s' % repr(self.network_client_address))
if obj[2] == 'ack':
log.debug('ack')
#find out key info
candidate_list = [ctr for ctr in self.order_of_keys_in_chunk_queue if ctr[1] == obj[1][0] and ctr[3] == obj[1][2]]
#remove from chunk_queue
if len(candidate_list)>0:
key = candidate_list[0]
if key in self.unacknowledged_packets: del self.unacknowledged_packets[key]
self.order_of_keys_in_chunk_queue.remove(key)
del self.chunk_queue[key]
if obj[2] == 'pac':# and obj[0][0] not in self.completed_tasks:
log.debug('pac')
#add to receive chunk queue queue
key = self.augment_timestamp_info_key(obj[0])
val = obj[1]
#add packet to receive chunk if not in self.completed_tasks
if obj[0][1] not in self.completed_tasks:
if key[0] not in self.recv_list_of_chunks: self.recv_list_of_chunks[key[0]] = []
if key[2] not in self.recv_list_of_chunks[key[0]]:
self.recv_list_of_chunks[key[0]].append(key[2])
self.receive_chunk_queue[key] = val
#send ack
s.sendto(pickle.dumps([0, obj[0], 'ack']), self.network_client_address)
#check if all packets have been received for the same taskid
if key[0] in self.recv_list_of_chunks and len(self.recv_list_of_chunks[key[0]]) == key[3]:
list_of_recv_chunks = [ctr for ctr in self.receive_chunk_queue.keys() if ctr[0] == key[0]]
list_of_recv_chunks.sort(key = lambda x: x[2])
#all chunks have been received
#transfer to receive_queue
self.receive_queue[key[0]] = ''.join([self.receive_chunk_queue.pop(ctr) for ctr in list_of_recv_chunks])
#mark timestamp in completed queue
self.completed_tasks[key[0]] = time.time()
#remove list of received chunk indices
del self.recv_list_of_chunks[key[0]]
#execute action
string_response = self.execute_message(self.receive_queue.pop(key[0]))
#log.debug(string_response)
#now send response
self.taskid += 1
#add message to the chunk_queue
(keys, chunks) = self.split_task(self.taskid, key[0], string_response)
#add keys to order_of_keys_in_chunk_queue
self.order_of_keys_in_chunk_queue.extend(keys)
#sort by priority
self.order_of_keys_in_chunk_queue.sort(key = lambda x: x[0])
#add entries to chunk_queue
for (key, val) in zip(keys, chunks):
self.chunk_queue[key] = val
except Exception as exc:
log.debug(repr(exc))
def send_remote_response(self, s):
if len(self.order_of_keys_in_chunk_queue)>0:
self.window = next_window(self.window, False)
list_of_keys_with_timeout = [ctr for ctr in self.unacknowledged_packets.keys() if self.unacknowledged_packets[ctr]<time.time()-RETRANSMISSION_TIMEOUT]
if len(list_of_keys_with_timeout)>0:
#assume packet is lost/network is congested
self.window = next_window(self.window, True)
for key in list_of_keys_with_timeout:
if key in self.unacknowledged_packets: del self.unacknowledged_packets[key]
if len(self.unacknowledged_packets.keys())<self.window:
#log.debug('send packets to remote filesystem')
numkeys = max(self.window - len(self.unacknowledged_packets.keys()), 0)
#find out keys which are not in transit
keys = []
ctr = 0
while len(keys)<numkeys and ctr < len(self.order_of_keys_in_chunk_queue):
if self.order_of_keys_in_chunk_queue[ctr] not in self.unacknowledged_packets:
keys.append(self.order_of_keys_in_chunk_queue[ctr])
ctr += 1
for key in keys:
self.unacknowledged_packets[key] = time.time()
self.lastsent = time.time()
string_to_be_sent = pickle.dumps([self.remove_priority_timestamp_info_from_key(key), self.chunk_queue[key], 'pac'])
log.debug('Length of datagram %d' % len(string_to_be_sent))
if len(string_to_be_sent)>DATAGRAM_SIZE: sys.exit(1)
s.sendto(string_to_be_sent, self.network_client_address)
def split_task(self, taskid, original_taskid, taskstring):
#this splits up the taskstring into a list of chunks
startpt = range(0, len(taskstring), CHUNK_SIZE)
chunks = [taskstring[pt:pt + CHUNK_SIZE] for pt in startpt[:-1]]
chunks.append(taskstring[startpt[-1]:len(taskstring)])
#smaller the task higher the priority
ids = [(len(taskstring), taskid, original_taskid, ctr, len(chunks), time.time()) for ctr in range(len(chunks))]
return (ids, chunks)
def remove_priority_timestamp_info_from_key(self, key):
return (key[1], key[2], key[3], key[4])
def augment_timestamp_info_key(self, key):
return (key[0], key[1], key[2], key[3], time.time())
def main_loop(self):
while self.inputs:
readable, writable, exceptional = select.select(self.inputs, self.outputs, self.inputs)
#prevent cpu burn
if len(self.receive_chunk_queue.keys()) > 0 or len(self.chunk_queue.keys()) > 0:
self.time_sleep = 0.0000000000000000000000003
else:
self.time_sleep = 0.003
for s in readable:
self.handle_remote_request(s)
for s in writable:
self.send_remote_response(s)
for s in exceptional:
self.inputs.remove(s)
if s in self.outputs:
self.outputs.remove(s)
time.sleep(self.time_sleep)
if __name__=='__main__':
if len(sys.argv)<3:
sys.stderr.write('Usage: ./network_server.py <hostname> <port>')
sys.exit(1)
network_server = Networkserver(sys.argv[1], int(sys.argv[2]))
network_server.main_loop()
| [
"mainakch@gmail.com"
] | mainakch@gmail.com |
82205644412d3f2657ffc1c27b62955a90efb2e8 | 9609c73cf0e877c5d76610bc284f103e401bd042 | /ResNet18.py | 7fa060b604a57ee4e470fb5b02ee11a80d5d8419 | [] | no_license | Amadeus-Winarto/model_zoo | 8968953a8b29be754cc9e0bb04347a172570b2cc | 22082089b5ee0b0b7f2c1c3a7f12a843759f7d3e | refs/heads/master | 2020-03-25T19:58:20.684591 | 2018-11-27T06:41:10 | 2018-11-27T06:41:10 | 144,109,315 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,418 | py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 27 14:21:34 2018
@author: valaxw
"""
from keras.models import Model
from keras.layers import Conv2D, Dense, Activation, BatchNormalization, Concatenate, Dropout, Add
from keras.layers import AveragePooling2D, GlobalAveragePooling2D, MaxPooling2D, GlobalMaxPooling2D
from keras.layers import ReLU, ELU, LeakyReLU
from keras.regularizers import l2
from ResNet import layers
def ResNetv2(img_input, ratio = 1, num_A = 2, num_B = 2, num_C = 2, num_D = 2, activation_type = 'relu', pool = 'max', num_classes = 1000, dropout = 0.5):
conv1 = Conv2D(64 // ratio, (7, 7), padding = 'same', strides = 2)(img_input)
pool1 = MaxPooling2D((3,3), strides=(2, 2), padding = 'same')(conv1)
filters = 64 // ratio
x = resnetv2.identity_block(pool1, filters = filters)
for i in range(num_A - 1):
x = resnetv2.identity_block(x, filters = filters)
filters *= 2
x = resnetv2.conv_block(x, filters = filters)
for i in range(num_B - 1):
x = resnetv2.identity_block(x, filters = filters)
filters *= 2
x = resnetv2.conv_block(x, filters = filters)
for i in range(num_C - 1):
x = resnetv2.identity_block(x, filters = filters)
filters *= 2
x = resnetv2.conv_block(x, filters = filters)
for i in range(num_D - 1):
x = resnetv2.identity_block(x, filters = filters)
if pool == 'avg':
x = GlobalAveragePooling2D(name='avg_pool')(x)
else:
x = GlobalMaxPooling2D(name='max_pool')(x)
x = Dropout(dropout)(x)
if num_classes == 2:
x = Dense(1, activation = 'sigmoid', name = 'fc')(x)
else:
x = Dense(num_classes, activation='softmax', name='fc')(x)
model = Model(img_input, x)
return model
class resnetv2:
def conv_block(block_input, filters, filter_size = (3, 3), strides = 2,
kernel_initializer='he_normal', kernel_regularizer = l2(1e-4), activation_type = 'relu'):
x = layers.convV2(block_input, filter_num = filters, filter_size = filter_size, strides = strides, kernel_initializer = kernel_initializer, kernel_regularizer = kernel_regularizer, activation_type = activation_type)
x = layers.convV2(x, filter_num = filters, filter_size = filter_size, kernel_initializer = kernel_initializer, kernel_regularizer = kernel_regularizer, activation_type = activation_type)
shortcut = layers.convV2(block_input, filter_num = filters, filter_size = (1, 1), strides = strides, kernel_initializer = kernel_initializer, kernel_regularizer = kernel_regularizer, activation_type = 'none')
x = Add()([x, shortcut])
return x
def identity_block(block_input, filters, filter_size = (3, 3),
kernel_initializer = 'he_normal', kernel_regularizer = l2(1e-4), activation_type = 'relu'): #https://arxiv.org/pdf/1603.05027.pdf
x = layers.convV2(block_input, filter_num = filters, filter_size = filter_size, kernel_initializer = kernel_initializer, kernel_regularizer = kernel_regularizer, activation_type = activation_type)
x = layers.convV2(x, filter_num = filters, filter_size = filter_size, kernel_initializer = kernel_initializer, kernel_regularizer = kernel_regularizer, activation_type = activation_type)
x = Add()([x, block_input])
return x | [
"noreply@github.com"
] | noreply@github.com |
6a01250803f6a63b6c7b195961c298df47b54af1 | 77bb7e8232362bb2b0b104e93c4b127174f462e5 | /3_accept_offer_response.py | ff86fcb3976f4b73642ea125c92977944d29f4f1 | [
"MIT"
] | permissive | bushido/cate | 2dfac48d4d3b9fd1cd297005b16357af342d405c | 4c43b449d8ccb0e5907117de320cdc9b1ab7e6f0 | refs/heads/master | 2020-12-29T00:42:49.543569 | 2015-01-22T17:36:06 | 2015-01-22T17:36:06 | 29,178,396 | 0 | 0 | null | 2015-01-22T17:22:54 | 2015-01-13T07:38:11 | Python | UTF-8 | Python | false | false | 4,033 | py | import praw
import calendar
import datetime
import json
import os.path
import socket
import sys
from altcoin import SelectParams
import altcoin.rpc
import bitcoin.core.key
from cate import *
from cate.error import ConfigurationError, MessageError, TradeError
from cate.fees import CFeeRate
from cate.tx import *
def assert_acceptance_valid(acceptance):
if 'trade_id' not in acceptance:
raise MessageError( "Missing trade ID from accepted offer.")
if acceptance['trade_id'].find(os.path.sep) != -1:
raise MessageError("Invalid trade ID received; trade must not contain path separators")
if 'secret_hash' not in acceptance:
raise MessageError( "Missing hash of secret value from accepted offer.")
if 'tx2' not in acceptance:
raise MessageError( "Missing TX2 refund transaction from accepted offer.")
if 'public_key_a' not in acceptance:
raise MessageError( "Missing peer public key from accepted offer.")
if len(acceptance['secret_hash']) != 64:
raise MessageError( "Hash of secret is the wrong length.")
def process_offer_accepted(acceptance, audit):
trade_id = acceptance['trade_id']
secret_hash = x(acceptance['secret_hash'])
peer_refund_tx = CTransaction.deserialize(x(acceptance['tx2']))
# Load the offer sent
offer = audit.load_json('1_offer.json')
offer_currency_code = NETWORK_CODES[offer['offer_currency_hash']]
offer_currency_quantity = offer['offer_currency_quantity']
# Connect to the daemon
# TODO: Check the configuration exists
altcoin.SelectParams(offer['offer_currency_hash'])
proxy = altcoin.rpc.AltcoinProxy(service_port=config['daemons'][offer_currency_code]['port'], btc_conf_file=config['daemons'][offer_currency_code]['config'])
fee_rate = CFeeRate(config['daemons'][offer_currency_code]['fee_per_kb'])
public_key_a = bitcoin.core.key.CPubKey(x(acceptance['public_key_a']))
private_key_b = audit.load_private_key('1_private_key.txt')
public_key_b = bitcoin.core.key.CPubKey(x(offer['public_key_b']))
assert_refund_tx_valid(peer_refund_tx, int(offer['ask_currency_quantity']))
peer_refund_tx_sig = get_recovery_tx_sig(peer_refund_tx, private_key_b, public_key_a, public_key_b, secret_hash)
# Generate TX3 & TX4, which are essentially the same as TX1 & TX2 except
# that ask/offer details are reversed
lock_datetime = datetime.datetime.utcnow() + datetime.timedelta(hours=48)
nLockTime = calendar.timegm(lock_datetime.timetuple())
own_address = proxy.getnewaddress("CATE refund " + trade_id)
tx3 = build_send_transaction(proxy, offer_currency_quantity, public_key_b, public_key_a, secret_hash, fee_rate)
own_refund_tx = build_unsigned_refund_tx(proxy, tx3, own_address, nLockTime, fee_rate)
# Write TX3 to audit directory as we don't send it yet
audit.save_tx('3_tx3.txt', tx3)
return {
'trade_id': trade_id,
'tx2_sig': b2x(peer_refund_tx_sig),
'tx4': b2x(own_refund_tx.serialize())
}
try:
config = load_configuration("config.yml")
except ConfigurationError as e:
print e
sys.exit(0)
r = praw.Reddit(user_agent = USER_AGENT)
try:
reddit_login(r, config)
except ConfigurationError as e:
print e
sys.exit(0)
for message in r.get_messages():
if message.subject != "CATE transaction accepted (2)":
continue
acceptance = json.loads(message.body)
try:
assert_acceptance_valid(acceptance)
except MessageError as err:
print("Received invalid trade from " + message.author.name)
continue
trade_id = acceptance['trade_id']
audit = TradeDao(trade_id)
if audit.file_exists('3_acceptance.json'):
print "Offer acceptance " + trade_id + " already received, ignoring offer"
continue
audit.save_json('3_acceptance.json', acceptance)
try:
response = process_offer_accepted(acceptance, audit)
except socket.error as err:
print "Could not connect to wallet."
sys.exit(1)
if not response:
break
audit.save_json('3_confirmation.json', response)
r.send_message(message.author, 'CATE transaction confirmed (3)', json.dumps(response))
| [
"jrn@jrn.me.uk"
] | jrn@jrn.me.uk |
95207d33f1d2faac4a06d1c11f383bb85168aea9 | 4e56685a0674b2210e855c4560d29735a8a6a540 | /contrib/macdeploy/macdeployqtplus | dc73914c024966071bb70bfa73defbbe8433a82b | [
"GPL-3.0-only",
"MIT"
] | permissive | clickgem/cgm | 467e16debf0b9a9d7a9ee6ef1e78f488b7c71443 | 3b46475f67a8c7884902ea818aba501e138da1a8 | refs/heads/master | 2020-03-15T11:39:12.518930 | 2019-01-18T03:40:37 | 2019-01-18T03:40:37 | 132,125,065 | 0 | 1 | MIT | 2019-01-21T10:19:24 | 2018-05-04T10:20:23 | C++ | UTF-8 | Python | false | false | 32,450 | #!/usr/bin/env python
#
# Copyright (C) 2011 Patrick "p2k" Schneider <me@p2k-network.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import subprocess, sys, re, os, shutil, stat, os.path
from string import Template
from time import sleep
from argparse import ArgumentParser
# This is ported from the original macdeployqt with modifications
class FrameworkInfo(object):
def __init__(self):
self.frameworkDirectory = ""
self.frameworkName = ""
self.frameworkPath = ""
self.binaryDirectory = ""
self.binaryName = ""
self.binaryPath = ""
self.version = ""
self.installName = ""
self.deployedInstallName = ""
self.sourceFilePath = ""
self.destinationDirectory = ""
self.sourceResourcesDirectory = ""
self.destinationResourcesDirectory = ""
def __eq__(self, other):
if self.__class__ == other.__class__:
return self.__dict__ == other.__dict__
else:
return False
def __str__(self):
return """ Framework name: %s
Framework directory: %s
Framework path: %s
Binary name: %s
Binary directory: %s
Binary path: %s
Version: %s
Install name: %s
Deployed install name: %s
Source file Path: %s
Deployed Directory (relative to bundle): %s
""" % (self.frameworkName,
self.frameworkDirectory,
self.frameworkPath,
self.binaryName,
self.binaryDirectory,
self.binaryPath,
self.version,
self.installName,
self.deployedInstallName,
self.sourceFilePath,
self.destinationDirectory)
def isDylib(self):
return self.frameworkName.endswith(".dylib")
def isQtFramework(self):
if self.isDylib():
return self.frameworkName.startswith("libQt")
else:
return self.frameworkName.startswith("Qt")
reOLine = re.compile(r'^(.+) \(compatibility version [0-9.]+, current version [0-9.]+\)$')
bundleFrameworkDirectory = "Contents/Frameworks"
bundleBinaryDirectory = "Contents/MacOS"
@classmethod
def fromOtoolLibraryLine(cls, line):
# Note: line must be trimmed
if line == "":
return None
# Don't deploy system libraries (exception for libQtuitools and libQtlucene).
if line.startswith("/System/Library/") or line.startswith("@executable_path") or (line.startswith("/usr/lib/") and "libQt" not in line):
return None
m = cls.reOLine.match(line)
if m is None:
raise RuntimeError("otool line could not be parsed: " + line)
path = m.group(1)
info = cls()
info.sourceFilePath = path
info.installName = path
if path.endswith(".dylib"):
dirname, filename = os.path.split(path)
info.frameworkName = filename
info.frameworkDirectory = dirname
info.frameworkPath = path
info.binaryDirectory = dirname
info.binaryName = filename
info.binaryPath = path
info.version = "-"
info.installName = path
info.deployedInstallName = "@executable_path/../Frameworks/" + info.binaryName
info.sourceFilePath = path
info.destinationDirectory = cls.bundleFrameworkDirectory
else:
parts = path.split("/")
i = 0
# Search for the .framework directory
for part in parts:
if part.endswith(".framework"):
break
i += 1
if i == len(parts):
raise RuntimeError("Could not find .framework or .dylib in otool line: " + line)
info.frameworkName = parts[i]
info.frameworkDirectory = "/".join(parts[:i])
info.frameworkPath = os.path.join(info.frameworkDirectory, info.frameworkName)
info.binaryName = parts[i+3]
info.binaryDirectory = "/".join(parts[i+1:i+3])
info.binaryPath = os.path.join(info.binaryDirectory, info.binaryName)
info.version = parts[i+2]
info.deployedInstallName = "@executable_path/../Frameworks/" + os.path.join(info.frameworkName, info.binaryPath)
info.destinationDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory)
info.sourceResourcesDirectory = os.path.join(info.frameworkPath, "Resources")
info.destinationResourcesDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, "Resources")
return info
class ApplicationBundleInfo(object):
def __init__(self, path):
self.path = path
appName = os.path.splitext(os.path.basename(path))[0]
self.binaryPath = os.path.join(path, "Contents", "MacOS", appName)
if not os.path.exists(self.binaryPath):
raise RuntimeError("Could not find bundle binary for " + path)
self.resourcesPath = os.path.join(path, "Contents", "Resources")
self.pluginPath = os.path.join(path, "Contents", "PlugIns")
class DeploymentInfo(object):
def __init__(self):
self.qtPath = None
self.pluginPath = None
self.deployedFrameworks = []
def detectQtPath(self, frameworkDirectory):
parentDir = os.path.dirname(frameworkDirectory)
if os.path.exists(os.path.join(parentDir, "translations")):
# Classic layout, e.g. "/usr/local/Trolltech/Qt-4.x.x"
self.qtPath = parentDir
elif os.path.exists(os.path.join(parentDir, "share", "qt4", "translations")):
# MacPorts layout, e.g. "/opt/local/share/qt4"
self.qtPath = os.path.join(parentDir, "share", "qt4")
elif os.path.exists(os.path.join(os.path.dirname(parentDir), "share", "qt4", "translations")):
# Newer Macports layout
self.qtPath = os.path.join(os.path.dirname(parentDir), "share", "qt4")
else:
self.qtPath = os.getenv("QTDIR", None)
if self.qtPath is not None:
pluginPath = os.path.join(self.qtPath, "plugins")
if os.path.exists(pluginPath):
self.pluginPath = pluginPath
def usesFramework(self, name):
nameDot = "%s." % name
libNameDot = "lib%s." % name
for framework in self.deployedFrameworks:
if framework.endswith(".framework"):
if framework.startswith(nameDot):
return True
elif framework.endswith(".dylib"):
if framework.startswith(libNameDot):
return True
return False
def getFrameworks(binaryPath, verbose):
if verbose >= 3:
print "Inspecting with otool: " + binaryPath
otool = subprocess.Popen(["otool", "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
o_stdout, o_stderr = otool.communicate()
if otool.returncode != 0:
if verbose >= 1:
sys.stderr.write(o_stderr)
sys.stderr.flush()
raise RuntimeError("otool failed with return code %d" % otool.returncode)
otoolLines = o_stdout.split("\n")
otoolLines.pop(0) # First line is the inspected binary
if ".framework" in binaryPath or binaryPath.endswith(".dylib"):
otoolLines.pop(0) # Frameworks and dylibs list themselves as a dependency.
libraries = []
for line in otoolLines:
info = FrameworkInfo.fromOtoolLibraryLine(line.strip())
if info is not None:
if verbose >= 3:
print "Found framework:"
print info
libraries.append(info)
return libraries
def runInstallNameTool(action, *args):
subprocess.check_call(["install_name_tool", "-"+action] + list(args))
def changeInstallName(oldName, newName, binaryPath, verbose):
if verbose >= 3:
print "Using install_name_tool:"
print " in", binaryPath
print " change reference", oldName
print " to", newName
runInstallNameTool("change", oldName, newName, binaryPath)
def changeIdentification(id, binaryPath, verbose):
if verbose >= 3:
print "Using install_name_tool:"
print " change identification in", binaryPath
print " to", id
runInstallNameTool("id", id, binaryPath)
def runStrip(binaryPath, verbose):
if verbose >= 3:
print "Using strip:"
print " stripped", binaryPath
subprocess.check_call(["strip", "-x", binaryPath])
def copyFramework(framework, path, verbose):
if framework.sourceFilePath.startswith("Qt"):
#standard place for Nokia Qt installer's frameworks
fromPath = "/Library/Frameworks/" + framework.sourceFilePath
else:
fromPath = framework.sourceFilePath
toDir = os.path.join(path, framework.destinationDirectory)
toPath = os.path.join(toDir, framework.binaryName)
if not os.path.exists(fromPath):
raise RuntimeError("No file at " + fromPath)
if os.path.exists(toPath):
return None # Already there
if not os.path.exists(toDir):
os.makedirs(toDir)
shutil.copy2(fromPath, toPath)
if verbose >= 3:
print "Copied:", fromPath
print " to:", toPath
permissions = os.stat(toPath)
if not permissions.st_mode & stat.S_IWRITE:
os.chmod(toPath, permissions.st_mode | stat.S_IWRITE)
if not framework.isDylib(): # Copy resources for real frameworks
fromResourcesDir = framework.sourceResourcesDirectory
if os.path.exists(fromResourcesDir):
toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory)
shutil.copytree(fromResourcesDir, toResourcesDir)
if verbose >= 3:
print "Copied resources:", fromResourcesDir
print " to:", toResourcesDir
elif framework.frameworkName.startswith("libQtGui"): # Copy qt_menu.nib (applies to non-framework layout)
qtMenuNibSourcePath = os.path.join(framework.frameworkDirectory, "Resources", "qt_menu.nib")
qtMenuNibDestinationPath = os.path.join(path, "Contents", "Resources", "qt_menu.nib")
if os.path.exists(qtMenuNibSourcePath) and not os.path.exists(qtMenuNibDestinationPath):
shutil.copytree(qtMenuNibSourcePath, qtMenuNibDestinationPath)
if verbose >= 3:
print "Copied for libQtGui:", qtMenuNibSourcePath
print " to:", qtMenuNibDestinationPath
return toPath
def deployFrameworks(frameworks, bundlePath, binaryPath, strip, verbose, deploymentInfo=None):
if deploymentInfo is None:
deploymentInfo = DeploymentInfo()
while len(frameworks) > 0:
framework = frameworks.pop(0)
deploymentInfo.deployedFrameworks.append(framework.frameworkName)
if verbose >= 2:
print "Processing", framework.frameworkName, "..."
# Get the Qt path from one of the Qt frameworks
if deploymentInfo.qtPath is None and framework.isQtFramework():
deploymentInfo.detectQtPath(framework.frameworkDirectory)
if framework.installName.startswith("@executable_path"):
if verbose >= 2:
print framework.frameworkName, "already deployed, skipping."
continue
# install_name_tool the new id into the binary
changeInstallName(framework.installName, framework.deployedInstallName, binaryPath, verbose)
# Copy farmework to app bundle.
deployedBinaryPath = copyFramework(framework, bundlePath, verbose)
# Skip the rest if already was deployed.
if deployedBinaryPath is None:
continue
if strip:
runStrip(deployedBinaryPath, verbose)
# install_name_tool it a new id.
changeIdentification(framework.deployedInstallName, deployedBinaryPath, verbose)
# Check for framework dependencies
dependencies = getFrameworks(deployedBinaryPath, verbose)
for dependency in dependencies:
changeInstallName(dependency.installName, dependency.deployedInstallName, deployedBinaryPath, verbose)
# Deploy framework if necessary.
if dependency.frameworkName not in deploymentInfo.deployedFrameworks and dependency not in frameworks:
frameworks.append(dependency)
return deploymentInfo
def deployFrameworksForAppBundle(applicationBundle, strip, verbose):
frameworks = getFrameworks(applicationBundle.binaryPath, verbose)
if len(frameworks) == 0 and verbose >= 1:
print "Warning: Could not find any external frameworks to deploy in %s." % (applicationBundle.path)
return DeploymentInfo()
else:
return deployFrameworks(frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose)
def deployPlugins(appBundleInfo, deploymentInfo, strip, verbose):
# Lookup available plugins, exclude unneeded
plugins = []
for dirpath, dirnames, filenames in os.walk(deploymentInfo.pluginPath):
pluginDirectory = os.path.relpath(dirpath, deploymentInfo.pluginPath)
if pluginDirectory == "designer":
# Skip designer plugins
continue
elif pluginDirectory == "phonon" or pluginDirectory == "phonon_backend":
# Deploy the phonon plugins only if phonon is in use
if not deploymentInfo.usesFramework("phonon"):
continue
elif pluginDirectory == "sqldrivers":
# Deploy the sql plugins only if QtSql is in use
if not deploymentInfo.usesFramework("QtSql"):
continue
elif pluginDirectory == "script":
# Deploy the script plugins only if QtScript is in use
if not deploymentInfo.usesFramework("QtScript"):
continue
elif pluginDirectory == "qmltooling":
# Deploy the qml plugins only if QtDeclarative is in use
if not deploymentInfo.usesFramework("QtDeclarative"):
continue
elif pluginDirectory == "bearer":
# Deploy the bearer plugins only if QtNetwork is in use
if not deploymentInfo.usesFramework("QtNetwork"):
continue
for pluginName in filenames:
pluginPath = os.path.join(pluginDirectory, pluginName)
if pluginName.endswith("_debug.dylib"):
# Skip debug plugins
continue
elif pluginPath == "imageformats/libqsvg.dylib" or pluginPath == "iconengines/libqsvgicon.dylib":
# Deploy the svg plugins only if QtSvg is in use
if not deploymentInfo.usesFramework("QtSvg"):
continue
elif pluginPath == "accessible/libqtaccessiblecompatwidgets.dylib":
# Deploy accessibility for Qt3Support only if the Qt3Support is in use
if not deploymentInfo.usesFramework("Qt3Support"):
continue
elif pluginPath == "graphicssystems/libqglgraphicssystem.dylib":
# Deploy the opengl graphicssystem plugin only if QtOpenGL is in use
if not deploymentInfo.usesFramework("QtOpenGL"):
continue
plugins.append((pluginDirectory, pluginName))
for pluginDirectory, pluginName in plugins:
if verbose >= 2:
print "Processing plugin", os.path.join(pluginDirectory, pluginName), "..."
sourcePath = os.path.join(deploymentInfo.pluginPath, pluginDirectory, pluginName)
destinationDirectory = os.path.join(appBundleInfo.pluginPath, pluginDirectory)
if not os.path.exists(destinationDirectory):
os.makedirs(destinationDirectory)
destinationPath = os.path.join(destinationDirectory, pluginName)
shutil.copy2(sourcePath, destinationPath)
if verbose >= 3:
print "Copied:", sourcePath
print " to:", destinationPath
if strip:
runStrip(destinationPath, verbose)
dependencies = getFrameworks(destinationPath, verbose)
for dependency in dependencies:
changeInstallName(dependency.installName, dependency.deployedInstallName, destinationPath, verbose)
# Deploy framework if necessary.
if dependency.frameworkName not in deploymentInfo.deployedFrameworks:
deployFrameworks([dependency], appBundleInfo.path, destinationPath, strip, verbose, deploymentInfo)
qt_conf="""[Paths]
translations=Resources
plugins=PlugIns
"""
ap = ArgumentParser(description="""Improved version of macdeployqt.
Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file.
Note, that the "dist" folder will be deleted before deploying on each run.
Optionally, Qt translation files (.qm) and additional resources can be added to the bundle.
Also optionally signs the .app bundle; set the CODESIGNARGS environment variable to pass arguments
to the codesign tool.
E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""")
ap.add_argument("app_bundle", nargs=1, metavar="app-bundle", help="application bundle to be deployed")
ap.add_argument("-verbose", type=int, nargs=1, default=[1], metavar="<0-3>", help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug")
ap.add_argument("-no-plugins", dest="plugins", action="store_false", default=True, help="skip plugin deployment")
ap.add_argument("-no-strip", dest="strip", action="store_false", default=True, help="don't run 'strip' on the binaries")
ap.add_argument("-sign", dest="sign", action="store_true", default=False, help="sign .app bundle with codesign tool")
ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image; if basename is not specified, a camel-cased version of the app name is used")
ap.add_argument("-fancy", nargs=1, metavar="plist", default=[], help="make a fancy looking disk image using the given plist file with instructions; requires -dmg to work")
ap.add_argument("-add-qt-tr", nargs=1, metavar="languages", default=[], help="add Qt translation files to the bundle's ressources; the language list must be separated with commas, not with whitespace")
ap.add_argument("-add-resources", nargs="+", metavar="path", default=[], help="list of additional files or folders to be copied into the bundle's resources; must be the last argument")
config = ap.parse_args()
verbose = config.verbose[0]
# ------------------------------------------------
app_bundle = config.app_bundle[0]
if not os.path.exists(app_bundle):
if verbose >= 1:
sys.stderr.write("Error: Could not find app bundle \"%s\"\n" % (app_bundle))
sys.exit(1)
app_bundle_name = os.path.splitext(os.path.basename(app_bundle))[0]
# ------------------------------------------------
for p in config.add_resources:
if verbose >= 3:
print "Checking for \"%s\"..." % p
if not os.path.exists(p):
if verbose >= 1:
sys.stderr.write("Error: Could not find additional resource file \"%s\"\n" % (p))
sys.exit(1)
# ------------------------------------------------
if len(config.fancy) == 1:
if verbose >= 3:
print "Fancy: Importing plistlib..."
try:
import plistlib
except ImportError:
if verbose >= 1:
sys.stderr.write("Error: Could not import plistlib which is required for fancy disk images.\n")
sys.exit(1)
if verbose >= 3:
print "Fancy: Importing appscript..."
try:
import appscript
except ImportError:
if verbose >= 1:
sys.stderr.write("Error: Could not import appscript which is required for fancy disk images.\n")
sys.stderr.write("Please install it e.g. with \"sudo easy_install appscript\".\n")
sys.exit(1)
p = config.fancy[0]
if verbose >= 3:
print "Fancy: Loading \"%s\"..." % p
if not os.path.exists(p):
if verbose >= 1:
sys.stderr.write("Error: Could not find fancy disk image plist at \"%s\"\n" % (p))
sys.exit(1)
try:
fancy = plistlib.readPlist(p)
except:
if verbose >= 1:
sys.stderr.write("Error: Could not parse fancy disk image plist at \"%s\"\n" % (p))
sys.exit(1)
try:
assert not fancy.has_key("window_bounds") or (isinstance(fancy["window_bounds"], list) and len(fancy["window_bounds"]) == 4)
assert not fancy.has_key("background_picture") or isinstance(fancy["background_picture"], str)
assert not fancy.has_key("icon_size") or isinstance(fancy["icon_size"], int)
assert not fancy.has_key("applications_symlink") or isinstance(fancy["applications_symlink"], bool)
if fancy.has_key("items_position"):
assert isinstance(fancy["items_position"], dict)
for key, value in fancy["items_position"].iteritems():
assert isinstance(value, list) and len(value) == 2 and isinstance(value[0], int) and isinstance(value[1], int)
except:
if verbose >= 1:
sys.stderr.write("Error: Bad format of fancy disk image plist at \"%s\"\n" % (p))
sys.exit(1)
if fancy.has_key("background_picture"):
bp = fancy["background_picture"]
if verbose >= 3:
print "Fancy: Resolving background picture \"%s\"..." % bp
if not os.path.exists(bp):
bp = os.path.join(os.path.dirname(p), bp)
if not os.path.exists(bp):
if verbose >= 1:
sys.stderr.write("Error: Could not find background picture at \"%s\" or \"%s\"\n" % (fancy["background_picture"], bp))
sys.exit(1)
else:
fancy["background_picture"] = bp
else:
fancy = None
# ------------------------------------------------
if os.path.exists("dist"):
if verbose >= 2:
print "+ Removing old dist folder +"
shutil.rmtree("dist")
# ------------------------------------------------
target = os.path.join("dist", app_bundle)
if verbose >= 2:
print "+ Copying source bundle +"
if verbose >= 3:
print app_bundle, "->", target
os.mkdir("dist")
shutil.copytree(app_bundle, target)
applicationBundle = ApplicationBundleInfo(target)
# ------------------------------------------------
if verbose >= 2:
print "+ Deploying frameworks +"
try:
deploymentInfo = deployFrameworksForAppBundle(applicationBundle, config.strip, verbose)
if deploymentInfo.qtPath is None:
deploymentInfo.qtPath = os.getenv("QTDIR", None)
if deploymentInfo.qtPath is None:
if verbose >= 1:
sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n")
config.plugins = False
except RuntimeError as e:
if verbose >= 1:
sys.stderr.write("Error: %s\n" % str(e))
sys.exit(ret)
# ------------------------------------------------
if config.plugins:
if verbose >= 2:
print "+ Deploying plugins +"
try:
deployPlugins(applicationBundle, deploymentInfo, config.strip, verbose)
except RuntimeError as e:
if verbose >= 1:
sys.stderr.write("Error: %s\n" % str(e))
sys.exit(ret)
# ------------------------------------------------
if len(config.add_qt_tr) == 0:
add_qt_tr = []
else:
qt_tr_dir = os.path.join(deploymentInfo.qtPath, "translations")
add_qt_tr = ["qt_%s.qm" % lng for lng in config.add_qt_tr[0].split(",")]
for lng_file in add_qt_tr:
p = os.path.join(qt_tr_dir, lng_file)
if verbose >= 3:
print "Checking for \"%s\"..." % p
if not os.path.exists(p):
if verbose >= 1:
sys.stderr.write("Error: Could not find Qt translation file \"%s\"\n" % (lng_file))
sys.exit(1)
# ------------------------------------------------
if verbose >= 2:
print "+ Installing qt.conf +"
f = open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb")
f.write(qt_conf)
f.close()
# ------------------------------------------------
if len(add_qt_tr) > 0 and verbose >= 2:
print "+ Adding Qt translations +"
for lng_file in add_qt_tr:
if verbose >= 3:
print os.path.join(qt_tr_dir, lng_file), "->", os.path.join(applicationBundle.resourcesPath, lng_file)
shutil.copy2(os.path.join(qt_tr_dir, lng_file), os.path.join(applicationBundle.resourcesPath, lng_file))
# ------------------------------------------------
if len(config.add_resources) > 0 and verbose >= 2:
print "+ Adding additional resources +"
for p in config.add_resources:
t = os.path.join(applicationBundle.resourcesPath, os.path.basename(p))
if verbose >= 3:
print p, "->", t
if os.path.isdir(p):
shutil.copytree(p, t)
else:
shutil.copy2(p, t)
# ------------------------------------------------
if config.sign and 'CODESIGNARGS' not in os.environ:
print "You must set the CODESIGNARGS environment variable. Skipping signing."
elif config.sign:
if verbose >= 1:
print "Code-signing app bundle %s"%(target,)
subprocess.check_call("codesign --force %s %s"%(os.environ['CODESIGNARGS'], target), shell=True)
# ------------------------------------------------
if config.dmg is not None:
def runHDIUtil(verb, image_basename, **kwargs):
hdiutil_args = ["hdiutil", verb, image_basename + ".dmg"]
if kwargs.has_key("capture_stdout"):
del kwargs["capture_stdout"]
run = subprocess.check_output
else:
if verbose < 2:
hdiutil_args.append("-quiet")
elif verbose >= 3:
hdiutil_args.append("-verbose")
run = subprocess.check_call
for key, value in kwargs.iteritems():
hdiutil_args.append("-" + key)
if not value is True:
hdiutil_args.append(str(value))
return run(hdiutil_args)
if verbose >= 2:
if fancy is None:
print "+ Creating .dmg disk image +"
else:
print "+ Preparing .dmg disk image +"
if config.dmg != "":
dmg_name = config.dmg
else:
spl = app_bundle_name.split(" ")
dmg_name = spl[0] + "".join(p.capitalize() for p in spl[1:])
if fancy is None:
try:
runHDIUtil("create", dmg_name, srcfolder="dist", format="UDBZ", volname=app_bundle_name, ov=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
else:
if verbose >= 3:
print "Determining size of \"dist\"..."
size = 0
for path, dirs, files in os.walk("dist"):
for file in files:
size += os.path.getsize(os.path.join(path, file))
size += int(size * 0.1)
if verbose >= 3:
print "Creating temp image for modification..."
try:
runHDIUtil("create", dmg_name + ".temp", srcfolder="dist", format="UDRW", size=size, volname=app_bundle_name, ov=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
if verbose >= 3:
print "Attaching temp image..."
try:
output = runHDIUtil("attach", dmg_name + ".temp", readwrite=True, noverify=True, noautoopen=True, capture_stdout=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
m = re.search("/Volumes/(.+$)", output)
disk_root = m.group(0)
disk_name = m.group(1)
if verbose >= 2:
print "+ Applying fancy settings +"
if fancy.has_key("background_picture"):
bg_path = os.path.join(disk_root, os.path.basename(fancy["background_picture"]))
if verbose >= 3:
print fancy["background_picture"], "->", bg_path
shutil.copy2(fancy["background_picture"], bg_path)
else:
bg_path = None
if fancy.get("applications_symlink", False):
os.symlink("/Applications", os.path.join(disk_root, "Applications"))
# The Python appscript package broke with OSX 10.8 and isn't being fixed.
# So we now build up an AppleScript string and use the osascript command
# to make the .dmg file pretty:
appscript = Template( """
on run argv
tell application "Finder"
tell disk "$disk"
open
set current view of container window to icon view
set toolbar visible of container window to false
set statusbar visible of container window to false
set the bounds of container window to {$window_bounds}
set theViewOptions to the icon view options of container window
set arrangement of theViewOptions to not arranged
set icon size of theViewOptions to $icon_size
$background_commands
$items_positions
close -- close/reopen works around a bug...
open
update without registering applications
delay 5
eject
end tell
end tell
end run
""")
itemscript = Template('set position of item "${item}" of container window to {${position}}')
items_positions = []
if fancy.has_key("items_position"):
for name, position in fancy["items_position"].iteritems():
params = { "item" : name, "position" : ",".join([str(p) for p in position]) }
items_positions.append(itemscript.substitute(params))
params = {
"disk" : "Clickgem-Qt",
"window_bounds" : "300,300,800,620",
"icon_size" : "96",
"background_commands" : "",
"items_positions" : "\n ".join(items_positions)
}
if fancy.has_key("window_bounds"):
params["window.bounds"] = ",".join([str(p) for p in fancy["window_bounds"]])
if fancy.has_key("icon_size"):
params["icon_size"] = str(fancy["icon_size"])
if bg_path is not None:
# Set background file, then call SetFile to make it invisible.
# (note: making it invisible first makes set background picture fail)
bgscript = Template("""set background picture of theViewOptions to file "$bgpic"
do shell script "SetFile -a V /Volumes/$disk/$bgpic" """)
params["background_commands"] = bgscript.substitute({"bgpic" : os.path.basename(bg_path), "disk" : params["disk"]})
s = appscript.substitute(params)
if verbose >= 2:
print("Running AppleScript:")
print(s)
p = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE)
p.communicate(input=s)
if p.returncode:
print("Error running osascript.")
if verbose >= 2:
print "+ Finalizing .dmg disk image +"
try:
runHDIUtil("convert", dmg_name + ".temp", format="UDBZ", o=dmg_name + ".dmg", ov=True)
except subprocess.CalledProcessError as e:
sys.exit(e.returncode)
os.unlink(dmg_name + ".temp.dmg")
# ------------------------------------------------
if verbose >= 2:
print "+ Done +"
sys.exit(0)
| [
"noreply@github.com"
] | noreply@github.com | |
45788bd433d9d1bb00ef60a33aba8c4313a3f17c | a08d885cb9150d7e84f5ffbf0c9734893105a898 | /2020/Day 07/handy_haversacks.py | 955330377eb8b2f3782915f6829a6cbbf904ca5f | [] | no_license | vhsw/Advent-of-Code | ab422c389340a1caf2ec17c5db4981add6433fbe | 3c1dac27667472202ab15098c48efaac19348edf | refs/heads/master | 2022-12-29T03:56:59.648395 | 2022-12-26T11:01:45 | 2022-12-26T11:01:45 | 162,491,163 | 0 | 0 | null | 2022-05-10T08:43:32 | 2018-12-19T21:10:26 | Python | UTF-8 | Python | false | false | 1,059 | py | "Day 07 answers"
import re
INPUT = "2020/Day 07/input.txt"
def parse(data):
m = {}
for line in data:
src, dst = line.split(" bags contain ")
for n, color in re.findall(r"(\d+) (\w+ \w+)", dst):
m.setdefault(src, []).append((int(n), color))
return m
def part1(data):
"Part 1 answer"
rules = parse(data)
bag_map = {c: [i[1] for i in rules[c]] for c in rules}
to_do = {c for c in bag_map if "shiny gold" in bag_map[c]}
seen = set()
while to_do:
new_c = to_do.pop()
seen.add(new_c)
to_do |= {c for c in bag_map if new_c in bag_map[c]}
return len(seen)
def f(c, rules):
if c not in rules:
return 1
s = 0
for n, new_c in rules[c]:
s += n * f(new_c, rules)
return s + 1
def part2(data):
"Part 2 answer"
rules = parse(data)
return f("shiny gold", rules) - 1
if __name__ == "__main__":
with open(INPUT) as fp:
DATA = fp.readlines()
print(f"Part 1: { part1(DATA) }")
print(f"Part 2: { part2(DATA) }")
| [
"vhsw@ya.ru"
] | vhsw@ya.ru |
5f62efd77cda877b0f315654e66fcb575dcf38a5 | b21180985c994c19e850ef51d5d87c6bf595dc21 | /wechat/queryexp.py | efc683b5018ed5bac565cde68dd6455b49f93e69 | [] | no_license | hldai/labelwc | c74d3af98576acd514f9136db663ca4cbd95708f | 38c969c61f240e49d5475be716c6b159b57220cd | refs/heads/master | 2020-12-02T22:18:06.991302 | 2017-08-13T13:04:44 | 2017-08-13T13:04:44 | 96,111,637 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,942 | py | from utils import load_names_file
def load_acronym_to_name(acronym_name_file, exclude_strs):
acr_name_dict = dict()
f = open(acronym_name_file, 'r')
for line in f:
line = line.strip().decode('utf-8')
acr, name, _ = line.split('\t')
if exclude_strs and acr in exclude_strs:
continue
acr_name_dict[acr] = name
# print acr, name_max
f.close()
return acr_name_dict
def load_name_to_acronym(acronym_name_file, abbrev_exclude_strs):
name_acr_cnt_dict = dict()
f = open(acronym_name_file, 'r')
for line in f:
line = line.strip().decode('utf-8')
acr, name, cnt = line.split('\t')
if name in abbrev_exclude_strs:
continue
cnt = int(cnt)
tup = name_acr_cnt_dict.get(name, None)
if not tup or tup[1] < cnt:
name_acr_cnt_dict[name] = (acr, cnt)
# print acr, name_max
f.close()
name_acr_dict = dict()
for name, (acr, cnt) in name_acr_cnt_dict.iteritems():
name_acr_dict[name] = acr
return name_acr_dict
def expand_word(word, acr_name_dict):
name_exp = ''
pl = 0
while pl < len(word):
pr = len(word)
exps = ''
while pr > pl:
exps = acr_name_dict.get(word[pl:pr], None)
if exps:
break
pr -= 1
if pr > pl:
name_exp += exps
pl = pr
else:
name_exp += word[pl]
pl = pr + 1
return name_exp
class QueryExpansion:
def __init__(self, acronym_name_file, extra_acronym_name_file, expand_exclude_strs_file,
abbrev_exclude_strs_file, cn_seg_app):
self.expand_exclude_strs = load_names_file(expand_exclude_strs_file)
self.acr_name_dict = load_acronym_to_name(acronym_name_file, self.expand_exclude_strs)
self.abbrev_exclude_strs = load_names_file(abbrev_exclude_strs_file)
self.name_acr_dict = load_name_to_acronym(acronym_name_file, self.abbrev_exclude_strs)
self.__load_extra_acronym_name_file(extra_acronym_name_file)
self.seg_app = cn_seg_app
def __load_extra_acronym_name_file(self, filename):
f = open(filename)
for line in f:
acr, name = line.strip().decode('utf-8').split('\t')
self.acr_name_dict[acr] = name
self.name_acr_dict[name] = acr
f.close()
def __expand_name_words_ob(self, name_words):
name_exp = ''
lw = len(name_words)
l = 0
while l < lw:
r = lw
cur_str = ''
while r > l:
cur_str = ''.join(name_words[l:r])
if cur_str in self.expand_exclude_strs:
break
r -= 1
if r > l:
name_exp += cur_str
l = r
else:
name_exp += expand_word(name_words[l], self.acr_name_dict)
print name_words[l], name_exp
l += 1
return name_exp
def __expand_name_words(self, name_words):
name_exp = ''
lw = len(name_words)
l = 0
while l < lw:
r = lw
flg = True
while r > l:
cur_str = ''.join(name_words[l:r])
if cur_str in self.expand_exclude_strs:
name_exp += cur_str
l = r
flg = False
break
str_exp = self.acr_name_dict.get(cur_str, '')
if str_exp:
name_exp += str_exp
l = r
flg = False
break
r -= 1
if flg:
name_exp += expand_word(name_words[l], self.acr_name_dict)
# print name_words[l], name_exp
l += 1
return name_exp
def __abbrev_name_words(self, name_words):
new_name = ''
wlen = len(name_words)
l = 0
while l < wlen:
r = wlen
flg = False
while r > l:
cur_str = ''.join(name_words[l:r])
str_acr = self.name_acr_dict.get(cur_str, '')
if str_acr:
new_name += str_acr
l = r
flg = True
break
r -= 1
if not flg:
new_name += name_words[l]
l += 1
return new_name
def query_expansion_words(self, name_words):
name_expand = self.__expand_name_words(name_words)
name_abbrev = self.__abbrev_name_words(name_words)
exp_names = []
if name_expand:
exp_names.append(name_expand)
if name_abbrev:
exp_names.append(name_abbrev)
return exp_names
def query_expansion(self, name_str):
name_words = self.seg_app.segment(name_str).split(' ')
name_expand = self.__expand_name_words(name_words)
name_abbrev = self.__abbrev_name_words(name_words)
exp_cands = [name_expand, name_abbrev]
exp_names = list()
for name in exp_cands:
if len(name) == len(name_str) - name_str.count(' '):
continue
if name != name_str:
exp_names.append(name)
return exp_names
def expand_name(self, name_str):
words = self.seg_app.segment(name_str).split(' ')
new_name = self.__expand_name_words(words)
if new_name != name_str:
return new_name
return ''
def abbrev_name(self, name_str):
words = self.seg_app.segment(name_str).split(' ')
new_name = self.__abbrev_name_words(words)
if len(new_name) == len(name_str) - 1 and ' ' in name_str:
return ''
if new_name != name_str:
return new_name
return ''
| [
"hldai@outlook.com"
] | hldai@outlook.com |
ed69f7188cb410e8984e1694b21b711cb0364bab | acb8e84e3b9c987fcab341f799f41d5a5ec4d587 | /langs/7/r_e.py | 94874cb9d8cd478b4704aa826a5d3460c87597a5 | [] | no_license | G4te-Keep3r/HowdyHackers | 46bfad63eafe5ac515da363e1c75fa6f4b9bca32 | fb6d391aaecb60ab5c4650d4ae2ddd599fd85db2 | refs/heads/master | 2020-08-01T12:08:10.782018 | 2016-11-13T20:45:50 | 2016-11-13T20:45:50 | 73,624,224 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 486 | py | import sys
def printFunction(lineRemaining):
if lineRemaining[0] == '"' and lineRemaining[-1] == '"':
if len(lineRemaining) > 2:
#data to print
lineRemaining = lineRemaining[1:-1]
print ' '.join(lineRemaining)
else:
print
def main(fileName):
with open(fileName) as f:
for line in f:
data = line.split()
if data[0] == 'r_E':
printFunction(data[1:])
else:
print 'ERROR'
return
if __name__ == '__main__':
main(sys.argv[1]) | [
"juliettaylorswift@gmail.com"
] | juliettaylorswift@gmail.com |
6b33144ab59c87ccc63dd96f597ca84ed29fe834 | db8efee73ce71f9a28b1bb16196971b0fecf0cba | /paxos.py | f5e8f20534eb25d06d6250b106d1a5e5ee50138a | [] | no_license | chandana22/DistributedTicketingSystem | 574f672f09727789382455e52836904f60dbf1d0 | 0e367e28677b9932f530d232d3b93753ab63a37e | refs/heads/master | 2021-05-05T12:41:00.468769 | 2018-01-20T19:24:24 | 2018-01-20T19:24:24 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,296 | py | import sys,select
from threading import Thread
import threading
from messages import *
from agents import *
import time
import socket,pickle
import config
name_info = {}
port_info = {}
ip_info = {}
port2client = {}
client2name = {}
def parse_file(filename,processName):
f = open(filename,'r')
count = 1
for line in f:
a = line.split()
if(a[0] == processName):
name_info['server'] = processName
port_info['server'] = int(a[1])
ip_info['server'] = a[2]
else:
name_info['client'+str(count)] = a[0]
port_info['client'+str(count)] = int(a[1])
ip_info['client'+str(count)] = a[2]
count = count + 1
## To get the information about the client if we know the port number
for key,value in port_info.items():
port2client[str(value)] = key
for key,value in name_info.items():
client2name[str(value)] = key
class console_thread(Thread):
def __init__(self,name,consoleToProposerQueueLock):
Thread.__init__(self)
self.name = name
self.consoleToProposerQueueLock = consoleToProposerQueueLock
self.msgCount = 0
def run(self):
config.active = True
while(True):
line = sys.stdin.readline().strip()
if(len(line.split()) > 0):
if (line.split()[0] == "Buy"):
if(len(line.split()) == 2):
value = int(line.split()[1])
else:
value = 1
print "Client has input Buy"
self.msgCount = self.msgCount + 1
msgId = self.name+str(self.msgCount)
msg = clientMessage(self.name,time.time(),value,msgId)
self.consoleToProposerQueueLock.acquire()
config.consoleToProposerQueue.put(msg)
self.consoleToProposerQueueLock.release()
## To Quit the System
elif (line.split()[0]=="Quit"):
config.active = False
break
elif (line.split()[0]=="Sleep"):
msg = "Sleep"
config.consoleToServerQueue.put(msg)
elif(line.split()[0] == "Show"):
msg = "Show"
config.consoleToStateMachineQueue.put(msg)
else:
print (self.name).upper() + ": Invalid input"
class config_thread(Thread):
def __init__(self,name,port,ip,configToProposerQueueLock):
Thread.__init__(self)
self.name = name
self.port = port
self.ip = ip
self.configToProposerQueueLock = configToProposerQueueLock
def run(self):
config.server_socket = socket.socket()
config.server_socket.bind((self.ip,self.port))
config.server_socket.listen(6)
config.client,config.addr = config.server_socket.accept()
config.client_info = config.client.recv(1024)
config.connections_made.append(name_info[port2client[config.client_info]])
config.ref_client_info[str(port2client[config.client_info])] = config.client
print (self.name).upper()+ ": Connection between "+self.name + " and " + name_info[port2client[config.client_info]] + " has been formed."
config.client1,config.addr1 = config.server_socket.accept()
config.client1_info = config.client1.recv(1024)
config.connections_made.append(name_info[port2client[config.client1_info]])
config.ref_client_info[str(port2client[config.client1_info])] = config.client1
print (self.name).upper()+ ": Connection between "+self.name + " and " + name_info[port2client[config.client1_info]] + " has been formed."
config.client2,config.addr2 = config.server_socket.accept()
config.client2_info = config.client2.recv(1024)
config.connections_made.append(name_info[port2client[config.client2_info]])
config.ref_client_info[str(port2client[config.client2_info])] = config.client2
print (self.name).upper()+ ": Connection between "+self.name + " and " + name_info[port2client[config.client2_info]] + " has been formed."
config.client3,config.addr3 = config.server_socket.accept()
config.client3_info = config.client3.recv(1024)
config.connections_made.append(name_info[port2client[config.client3_info]])
config.ref_client_info[str(port2client[config.client3_info])] = config.client3
print (self.name).upper()+ ": Connection between "+self.name + " and " + name_info[port2client[config.client3_info]] + " has been formed."
## Send this configuration message to the propser queue if you are the leader
if(self.name == config.currLeader):
self.configToProposerQueueLock.acquire()
config.configToProposerQueue.put(name_info[port2client[config.client3_info]])
self.configToProposerQueueLock.release()
config.client4,config.addr4 = config.server_socket.accept()
config.client4_info = config.client4.recv(1024)
config.connections_made.append(name_info[port2client[config.client4_info]])
config.ref_client_info[str(port2client[config.client4_info])] = config.client4
print (self.name).upper()+ ": Connection between "+self.name + " and " + name_info[port2client[config.client4_info]] + " has been formed."
if(self.name == config.currLeader):
self.configToProposerQueueLock.acquire()
config.configToProposerQueue.put(name_info[port2client[config.client4_info]])
self.configToProposerQueueLock.release()
##ref_client_info[str(port2client[self.client3_info])] = self.client3
print config.ref_client_info
print name_info
print client2name
class server_thread(Thread):
def __init__(self,name,port,ip,proposerToServerQueueLock,acceptorToServerQueueLock,learnerToServerQueueLock,stateMachineToServerQueueLock):
Thread.__init__(self)
self.name = name
self.port = port
self.ip = ip
self.proposerToServerQueueLock = proposerToServerQueueLock
self.acceptorToServerQueueLock = acceptorToServerQueueLock
self.learnerToServerQueueLock = learnerToServerQueueLock
self.stateMachineToServerQueueLock = stateMachineToServerQueueLock
def run(self):
##self.invoke_server()
self.send_info()
def send_info(self):
time.sleep(1)
while(config.active):
## This is just used for testing making the server sleep so other process thinks it is dead
while(not config.consoleToServerQueue.empty()):
msg = config.consoleToServerQueue.get()
if (msg == "Sleep"):
print "Sleep Started ..............."
time.sleep(200)
print "Sleep Ended ..............."
config.proposerToServerQueue.queue.clear()
config.acceptorToServerQueue.queue.clear()
config.learnerToServerQueue.queue.clear()
config.stateMachineToServerQueue.queue.clear()
## Emptying all the queues connected to server
## Have totally 3 queues which server needs to check
## Checking the proposer to server queue the ID in the message is used to get corresponding socket of the receiveing end
while(not config.proposerToServerQueue.empty()):
print "proposer put something to server"
self.proposerToServerQueueLock.acquire()
msg = config.proposerToServerQueue.get()
config.ref_client_info[client2name[msg.recvId]].send(pickle.dumps(msg))
self.proposerToServerQueueLock.release()
time.sleep(0)
## Checking the acceptor to server queue the ID in the message is used to get corresponding socket of the receiveing end
while(not config.acceptorToServerQueue.empty()):
print "acceptor put something to server"
self.acceptorToServerQueueLock.acquire()
msg = config.acceptorToServerQueue.get()
config.ref_client_info[client2name[msg.recvId]].send(pickle.dumps(msg))
self.acceptorToServerQueueLock.release()
time.sleep(0)
## Checking the learner to server queue the ID in the message is used to get corresponding socket of the receiveing end
while(not config.learnerToServerQueue.empty()):
print "learner put something to server"
self.learnerToServerQueueLock.acquire()
msg = config.learnerToServerQueue.get()
config.ref_client_info[client2name[msg.recvId]].send(pickle.dumps(msg))
self.learnerToServerQueueLock.release()
time.sleep(0)
## Checking for the request made by state machine in case of missing log entries
while(not config.stateMachineToServerQueue.empty()):
print "State Machine put something to server"
self.stateMachineToServerQueueLock.acquire()
msg = config.stateMachineToServerQueue.get()
config.ref_client_info[client2name[msg.recvId]].send(pickle.dumps(msg))
self.stateMachineToServerQueueLock.release()
time.sleep(0)
time.sleep(0)
## Sending Quit message to all the clients
for names in config.connections_made:
config.ref_client_info[client2name[names]].send(pickle.dumps("Quit"))
config.server_socket.close()
## Later for reconfiguration you can constantly check for the new connections
class client_thread(Thread):
def __init__(self,name,port,ip,clientToProposerQueueLock,clientToAcceptorQueueLock,clientToLearnerQueueLock):
Thread.__init__(self)
self.name = name
self.port = port
self.ip = ip
self.clientToProposerQueueLock = clientToProposerQueueLock
self.clientToAcceptorQueueLock = clientToAcceptorQueueLock
self.clientToLearnerQueueLock = clientToLearnerQueueLock
#self.client_socket = socket.socket()
def run(self):
self.invoke_client()
self.get_info()
self.client_socket.close()
def invoke_client(self):
self.client_socket = socket.socket()
#self.client_socket.setblocking(0)
while (True):
try:
##print "waiting to connect to master"
##self.client_socket.connect(('127.0.0.1',self.port))
self.client_socket.connect((str(ip_info[str(client2name[self.name])]),self.port))
self.client_socket.send(str(port_info['server']))
break
except socket.error as msg:
continue
def get_info(self):
while(config.active):
## depdending on the message type we put it on corresponding queue
recvd = self.client_socket.recv(1024)
recvdMessage = pickle.loads(recvd)
print "Client received a message"
## Can be removed later
if recvdMessage == "Quit":
break
if isinstance(recvdMessage,hearBeatMessage):
print "Process received HeartBeat Message from " + recvdMessage.leaderId
config.prevRecvHeartBeat = time.time()
## if received message is a message from proposer to acceptor for proposing value then send it to acceptor
if isinstance(recvdMessage,sendProposedValueToAcceptors):
print "client received message from proposer to acceptor to accept value"
self.clientToAcceptorQueueLock.acquire()
config.clientToAcceptorQueue.put(recvdMessage)
self.clientToAcceptorQueueLock.release()
## if received message is a message from proposer to acceptor for proposing configuration then send it to acceptor
if isinstance(recvdMessage,configurationMessageToAcceptors):
print "client received message from proposer to acceptor to accept configuration"
self.clientToAcceptorQueueLock.acquire()
config.clientToAcceptorQueue.put(recvdMessage)
self.clientToAcceptorQueueLock.release()
## if received message is a message from acceptor to learner to accept the configuration then send it to learner
if isinstance(recvdMessage,configurationMessageToLearners):
print "client received message from acceptot to Learner to accept configuration"
self.clientToLearnerQueueLock.acquire()
config.clientToLearnerQueue.put(recvdMessage)
self.clientToLearnerQueueLock.release()
## if received message is a message from acceptor that it has accepted proposed value send it to the proposer
if isinstance(recvdMessage,sendAcceptedValueToLeader):
print "client received message from acceptor to leader that it has accepted"
self.clientToProposerQueueLock.acquire()
config.clientToProposerQueue.put(recvdMessage)
self.clientToProposerQueueLock.release()
## if received message is a message from proposer to learner to write into log send it to learner
if isinstance(recvdMessage,sendAcceptedValueToLearners):
print "client received message from acceptor to learner to accept values"
self.clientToLearnerQueueLock.acquire()
config.clientToLearnerQueue.put(recvdMessage)
self.clientToLearnerQueueLock.release()
## if received message is a message from another process and is a console message in that process
if isinstance(recvdMessage,sendClientMessageToLeader):
print "client received console message from another process which is not the leader"
self.clientToProposerQueueLock.acquire()
config.clientToProposerQueue.put(recvdMessage)
self.clientToProposerQueueLock.release()
## if received message is a message from another process which wants to be leader
if isinstance(recvdMessage,sendProposedLeaderToAcceptors):
print "client received message from another process which wants to be leader"
self.clientToAcceptorQueueLock.acquire()
config.clientToAcceptorQueue.put(recvdMessage)
self.clientToAcceptorQueueLock.release()
## if receives message is a message from another process which has accepted the current process to be leader
if isinstance(recvdMessage,sendAcceptedLeaderToProposer):
print "client received message from another process which has accepted the current process to be leader"
## Putting it on acceptor queue since the proposer thread is waiting for response
self.clientToAcceptorQueueLock.acquire()
config.clientToAcceptorQueue.put(recvdMessage)
self.clientToAcceptorQueueLock.release()
## if received a message from another process state machine for acquiring logs
if isinstance(recvdMessage,sendRequestForLogEntries):
print "client received a message from annother process state machine for log entries.Putting the message in learner queue"
self.clientToLearnerQueueLock.acquire()
config.clientToLearnerQueue.put(recvdMessage)
self.clientToLearnerQueueLock.release()
## if received log entries message from another process, send it to the learner it will update
if isinstance(recvdMessage,sendLogEntriesMessage):
print "Client received missing log entries from another process"
self.clientToLearnerQueueLock.acquire()
config.clientToLearnerQueue.put(recvdMessage)
self.clientToLearnerQueueLock.release()
time.sleep(0)
def process(argv):
parse_file(sys.argv[2],sys.argv[1])
clientToProposerQueueLock = threading.RLock()
clientToAcceptorQueueLock = threading.RLock()
clientToLearnerQueueLock = threading.RLock()
proposerToServerQueueLock = threading.RLock()
acceptorToServerQueueLock = threading.RLock()
learnerToServerQueueLock = threading.RLock()
consoleToProposerQueueLock = threading.RLock()
stateMachineToServerQueueLock = threading.RLock()
configToProposerQueueLock = threading.RLock()
console = console_thread(name_info['server'],consoleToProposerQueueLock)
server = server_thread(name_info['server'],port_info['server'],ip_info['server'],proposerToServerQueueLock,acceptorToServerQueueLock,learnerToServerQueueLock,stateMachineToServerQueueLock)
client = client_thread(name_info['server'],port_info['server'],ip_info['server'],clientToProposerQueueLock,clientToAcceptorQueueLock,clientToLearnerQueueLock)
client1 = client_thread(name_info['client1'],port_info['client1'],ip_info['client1'],clientToProposerQueueLock,clientToAcceptorQueueLock,clientToLearnerQueueLock)
client2 = client_thread(name_info['client2'],port_info['client2'],ip_info['client2'],clientToProposerQueueLock,clientToAcceptorQueueLock,clientToLearnerQueueLock)
client3 = client_thread(name_info['client3'],port_info['client3'],ip_info['client3'],clientToProposerQueueLock,clientToAcceptorQueueLock,clientToLearnerQueueLock)
client4 = client_thread(name_info['client4'],port_info['client4'],ip_info['client4'],clientToProposerQueueLock,clientToAcceptorQueueLock,clientToLearnerQueueLock)
config = config_thread(name_info['server'],port_info['server'],ip_info['server'],configToProposerQueueLock)
proposer = Proposer(name_info['server'],consoleToProposerQueueLock,proposerToServerQueueLock,clientToProposerQueueLock,configToProposerQueueLock,"Srinu")
acceptor = Acceptor(name_info['server'],clientToAcceptorQueueLock,acceptorToServerQueueLock,"Srinu")
learner = Learner(name_info['server'],clientToLearnerQueueLock,learnerToServerQueueLock)
stateMachine = StateMachine(name_info['server'],stateMachineToServerQueueLock)
console.start()
config.start()
server.start()
client.start()
client1.start()
client2.start()
client3.start()
client4.start()
proposer.start()
acceptor.start()
learner.start()
stateMachine.start()
if __name__ == '__main__':
process(sys.argv)
| [
"noreply@github.com"
] | noreply@github.com |
a47ece65b19b3e5a792434c004bb0e73e6b949b8 | 7e4df92ef84da23cf33c4c4ecf4623d15ea480bf | /scripts/take_good_barcodes.py | 7c6f82461f3925ac550d5297979aa5a2b07c4d49 | [] | no_license | mandricigor/ct-eqtl-design | 0828b65c6d5139d4b56f1131387a2ea75d071cab | 4182fde75638a6106c371af27c035051b21cae29 | refs/heads/master | 2020-12-13T13:33:12.869147 | 2020-06-03T05:20:41 | 2020-06-03T05:20:41 | 234,433,257 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 746 | py |
import sys
import pysam
inputfile = sys.argv[1]
outputfile = sys.argv[2]
barcodes = sys.argv[3]
with open(barcodes) as f:
a = f.readlines()
a = list(map(lambda x: x.strip().split()[0], a))
#samfile = pysam.AlignmentFile("immvarYE_0831_1.splitted_1.bam", "rb")
samfile = pysam.AlignmentFile(inputfile, "rb")
sam = samfile.fetch()
samdict = {}
for barcode in a:
samdict[barcode] = []
for uu in sam:
try:
barcode = uu.get_tag("CB")
if barcode in samdict:
samdict[barcode].append(uu)
except Exception as e:
pass
header = samfile.header
with pysam.AlignmentFile(outputfile, "wb", header=header) as outf:
for u, v in samdict.items():
for w in v:
outf.write(w)
| [
"imandric@dlmpatadm11277.ad.medctr.ucla.edu"
] | imandric@dlmpatadm11277.ad.medctr.ucla.edu |
14cc45de89528b42640f58cba86eb2f58860bbcc | 1879e4df9cff25bc0c32ff63aedc859301062f9d | /0x05-personal_data/encrypt_password.py | 088ba68a96806e4bfba46db604229b5f920df220 | [] | no_license | rakiasomai/holbertonschool-web_back_end | 0f9d36160c9762df0826adcac66b009d1076043b | f5aeeda56def93fe13d901dd52217b0dbd4124e9 | refs/heads/master | 2023-02-28T10:02:54.929275 | 2021-02-06T22:17:04 | 2021-02-06T22:17:04 | 305,420,230 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 389 | py | #!/usr/bin/env python3
''' Personal data '''
import bcrypt
def hash_password(password: str) -> bytes:
''' def hash password '''
var = password.encode('utf-8')
return bcrypt.hashpw(var, bcrypt.gensalt())
def is_valid(hashed_password: bytes, password: str) -> bool:
''' def is valid '''
var = password.encode('utf-8')
return bcrypt.checkpw(var, hashed_password)
| [
"somai.rakia@hotmail.fr"
] | somai.rakia@hotmail.fr |
281e59753dd53d34c64d7fc2c26b4080280ec8d0 | 6a41ad8fc0b64647ddcfb8ac18b325e6668ed454 | /venv/Scripts/pip3.8-script.py | 6ee7374c2c053118a47c8fc599c8f57696a4fda1 | [] | no_license | YazCodes/CFG_Python | 656414151e2e3c7e00e0d4f85e3bad03bb766f9d | c08b596f5072776621937aa61cbccdf4cf772577 | refs/heads/master | 2022-04-24T16:37:55.673648 | 2020-04-26T14:34:17 | 2020-04-26T14:34:17 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 420 | py | #!C:\Users\yasmi\PycharmProjects\cfg_python\venv\Scripts\python.exe
# EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3.8'
__requires__ = 'pip==19.0.3'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('pip==19.0.3', 'console_scripts', 'pip3.8')()
)
| [
"yjones@spartaglobal.com"
] | yjones@spartaglobal.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.