blob_id
stringlengths
40
40
directory_id
stringlengths
40
40
path
stringlengths
3
281
content_id
stringlengths
40
40
detected_licenses
listlengths
0
57
license_type
stringclasses
2 values
repo_name
stringlengths
6
116
snapshot_id
stringlengths
40
40
revision_id
stringlengths
40
40
branch_name
stringclasses
313 values
visit_date
timestamp[us]
revision_date
timestamp[us]
committer_date
timestamp[us]
github_id
int64
18.2k
668M
star_events_count
int64
0
102k
fork_events_count
int64
0
38.2k
gha_license_id
stringclasses
17 values
gha_event_created_at
timestamp[us]
gha_created_at
timestamp[us]
gha_language
stringclasses
107 values
src_encoding
stringclasses
20 values
language
stringclasses
1 value
is_vendor
bool
2 classes
is_generated
bool
2 classes
length_bytes
int64
4
6.02M
extension
stringclasses
78 values
content
stringlengths
2
6.02M
authors
listlengths
1
1
author
stringlengths
0
175
437f4bed10a965bd4a4e33fdfb3de90c1faffc8d
9c651d78fe2beb015e46f3cea556aaaab93ede2c
/boilerplates/airflow/files/docker/scripts/setup.py
0a1629c000f707d78ebb4d5ff07aec9487aeb19c
[ "Apache-2.0" ]
permissive
SHoen/ods-project-quickstarters
c45d4f3843701058b9d18724ace8de7d843ee1b1
f216bd430b50fdeafe05f6ef68905ef82bb60980
refs/heads/master
2021-06-30T15:01:27.002319
2020-10-16T07:48:02
2020-10-16T07:48:02
175,852,967
0
0
Apache-2.0
2020-10-16T07:48:03
2019-03-15T16:12:11
Shell
UTF-8
Python
false
false
284
py
#!/usr/bin/env python import setuptools setuptools.setup(name='airflow-dag-dependencies', version='0.1', description='DAG dependencies', url='https://www.python.org/sigs/distutils-sig/', packages=setuptools.find_packages(), install_requires=[] )
[ "hugo.wruck_schneider@boehringer-ingelheim.com" ]
hugo.wruck_schneider@boehringer-ingelheim.com
5fc8a8e462763bec2057c096836f914959a5c69f
150b412c520cab07855bbce69d40ae35a2afd607
/nlp_lib.py
04c8e22892ff815e5f1de63549137d84ae317e4b
[]
no_license
nuthasid/Text_Classification
781d181448efba3d1e0fe06ad316a5fc4774ad4c
311b0a49f7af297c079b010132f9160c7be35a0b
refs/heads/master
2020-03-19T05:52:53.362022
2018-07-13T09:32:01
2018-07-13T09:32:01
135,970,618
0
1
null
2018-06-07T06:55:53
2018-06-04T04:29:24
Jupyter Notebook
UTF-8
Python
false
false
1,183
py
def levenshtein(word1, word2): """ Return minimum edit distance between word1 and word2. """ def call_counter(func): def helper(*args, **kwargs): helper.calls += 1 return func(*args, **kwargs) helper.calls = 0 helper.__name__ = func.__name__ return helper memo = {} @call_counter def leven(s, t): if s == "": return len(t) if t == "": return len(s) cost = 0 if s[-1] == t[-1] else 1 i1 = (s[:-1], t) if not i1 in memo: memo[i1] = leven(*i1) i2 = (s, t[:-1]) if not i2 in memo: memo[i2] = leven(*i2) i3 = (s[:-1], t[:-1]) if not i3 in memo: memo[i3] = leven(*i3) res = min([memo[i1] + 1, memo[i2] + 1, memo[i3] + cost]) return res return leven(word1, word2) def edu_tokenizer(doc): """ :param doc: string document containing Thai text :return: a list of word tokens tokenized using tltk word segmentation """ from tltk import segment return segment(doc).replace('<u/>', '|').replace('|<s/>|', '|').split('|')
[ "nuthasid.r@outlook.com" ]
nuthasid.r@outlook.com
e48e8a3d9f50dad1480843e7c35ea699a584238d
4eeee51e8d8dd6e37971d199d200176221864a22
/app/views.py
2ee1b7213b9ab568ccb2c77112361d7507d23d92
[]
no_license
perkinsashley26/info3180-p2
3395243dad517b6a164b2af562add58c4875252e
163b1d3b6541c821354e8f183fab2870037981c3
refs/heads/main
2023-04-10T13:19:23.051496
2021-04-14T18:04:14
2021-04-14T18:04:14
null
0
0
null
null
null
null
UTF-8
Python
false
false
4,263
py
""" Flask Documentation: http://flask.pocoo.org/docs/ Jinja2 Documentation: http://jinja.pocoo.org/2/documentation/ Werkzeug Documentation: http://werkzeug.pocoo.org/documentation/ This file creates your application. """ import os from app import app, db from flask import render_template, request, redirect, url_for, send_from_directory, flash #from app.propertyform import Propertyform from werkzeug.utils import secure_filename from app.models import Favourites, Cars, Users import psycopg2 from sqlalchemy import create_engine from sqlalchemy.orm import scoped_session, sessionmaker ### # Routing for your application. ### @app.route('/') def home(): """Render website's home page.""" return app.send_static_file('index.html') #@app.route('/about/') #def about(): #"""Render the website's about page.""" #return render_template('about.html', name="Mary Jane") """ @app.route('/property', methods=['POST', 'GET']) def property(): form=Propertyform() if request.method == 'POST': if form.validate_on_submit: photo=form.photo.data filename=secure_filename(photo.filename) photo.save(os.path.join(app.config['UPLOAD_FOLDER'], filename)) datab=Properties() datab.title=form.title.data datab.desc=form.desc.data datab.bedroom=form.bedroom.data datab.bathroom=form.bathroom.data datab.price=form.price.data datab.location=form.location.data datab.propertytype=form.select.data datab.photoname=filename db.session.add(datab) db.session.commit() flash('Property Added', 'success') return redirect(url_for('properties')) return render_template('propertyform.html', form=form) def getprop(): prop=Properties.query.all() results=[{ "photo":p.photoname, "title":p.title, "location":p.location, "price":p.price, "id":p.id, "bedroom":p.bedroom, "bathroom":p.bathroom, "propertytype":p.propertytype, "desc":p.desc } for p in prop] return results #def connect_db(): # return psycopg2.connect(app.config['SQLALCHEMY_DATABASE_URI']) @app.route('/properties') def properties(): prop=getprop() return render_template('properties.html',prop=prop ) @app.route('/properties/<ph>') def get_image(ph): root_dir=os.getcwd() return send_from_directory(os.path.join(root_dir, app.config['UPLOAD_FOLDER']), ph) def get_uploaded_images(): rootdir=os.getcwd() path=rootdir+ '/uploads' file_list = [] for subdir, dirs, files in os.walk(path): for name in files: if name.endswith(('.png','.PNG', '.jpg','.JPG', '.jpeg','JPEG')): file_list.append(name) return file_list @app.route('/property/<propertyid>') def viewproperty(propertyid): prop=getprop() l=[prop,propertyid] return render_template('property.html', prop=l) """ ### # The functions below should be applicable to all Flask apps. ### # Display Flask WTF errors as Flash messages def flash_errors(form): for field, errors in form.errors.items(): for error in errors: flash(u"Error in the %s field - %s" % ( getattr(form, field).label.text, error ), 'danger') @app.route('/<file_name>.txt') def send_text_file(file_name): """Send your static text file.""" file_dot_text = file_name + '.txt' return app.send_static_file(file_dot_text) @app.after_request def add_header(response): """ Add headers to both force latest IE rendering engine or Chrome Frame, and also tell the browser not to cache the rendered page. If we wanted to we could change max-age to 600 seconds which would be 10 minutes. """ response.headers['X-UA-Compatible'] = 'IE=Edge,chrome=1' response.headers['Cache-Control'] = 'public, max-age=0' return response @app.errorhandler(404) def page_not_found(error): """Custom 404 page.""" return render_template('404.html'), 404 if __name__ == '__main__': app.run(debug=True,host="0.0.0.0",port="8080")
[ "pambersam@gmail.com" ]
pambersam@gmail.com
1ecd8e3da77d4553e91626692b959df595bb8228
c8ab48ebc8fdc1e818b4ae0194de642a123eee7f
/Assessment_1/q2.py
f995307d9cab125e8986353bf0f04b386e64bde2
[]
no_license
BaDMaN90/COM404
78c9a1bde65bc8c1696ad0cb50336276856fc2df
9930c115f75473f99f67c07270efc3460067acaf
refs/heads/master
2020-08-04T06:44:58.722478
2019-12-22T23:40:12
2019-12-22T23:40:12
212,043,625
0
0
null
null
null
null
UTF-8
Python
false
false
252
py
#prints message and ask for user response print("Who is locked outside (Fred, Dino or the Cat)") who = input() #response based on user input if who == "Fred": print("Willma!!!") elif who == "Dino": print("Rarr! Rarr!") else: print("Meow!")
[ "4zdebf99@solent.ac.uk" ]
4zdebf99@solent.ac.uk
e0f5050286bc2b0176e7fcbbef3bbe11b152e869
397e109f024c264991f927b4788c8a005c67dc8d
/authors/apps/ratings/views.py
dfb185cc80d6db6ff87450585c0968597a19c886
[ "BSD-3-Clause" ]
permissive
andela/ah-the-unsullied
989b0ee4a22ae373dab3acf34353ff34ddca9df9
7821c96f16880fb5c3cbd2d51fbd43669c2ee7fd
refs/heads/develop
2020-04-11T23:03:36.813936
2019-02-28T07:16:29
2019-02-28T07:16:29
162,156,016
7
2
BSD-3-Clause
2019-02-28T07:16:30
2018-12-17T16:02:34
Python
UTF-8
Python
false
false
3,364
py
from rest_framework import status from rest_framework.exceptions import NotFound from rest_framework.generics import ( GenericAPIView ) from rest_framework.permissions import ( IsAuthenticatedOrReadOnly ) from rest_framework.response import Response from .responses import ( error_messages, successful_submission, success_messages ) from .serializers import RatingSerializer from ..articles.models import Article, Rating from ..utils import get_article_rating class ArticleRatingAPIView(GenericAPIView): queryset = Rating.objects.all() serializer_class = RatingSerializer permission_classes = (IsAuthenticatedOrReadOnly,) @staticmethod def get_article(slug): """Returns the first record in the articles table with the slug""" article = Article.objects.all().filter(slug=slug).first() return article def post(self, request, slug): """POST Request to rate an article""" rating = request.data article = self.get_article(slug) if check_article_exists(article): return check_article_exists(article) if request.user.id == article.author.id: return Response( error_messages['unauthorized'], status=status.HTTP_403_FORBIDDEN ) try: current_article_rating = Rating.objects.get( user=request.user.id, article=article.id ) serializer = self.serializer_class( current_article_rating, data=rating) except Rating.DoesNotExist: serializer = self.serializer_class(data=rating) serializer.is_valid(raise_exception=True) serializer.save(user=request.user, article=article) return Response({ 'message': successful_submission['message'], 'data': serializer.data }, status=status.HTTP_201_CREATED) def get(self, request, slug): """Returns an article's ratings""" article = self.get_article(slug) rating = None # check if the article exists if check_article_exists(article): return check_article_exists(article) # if the user is authenticated fetch their ratings if request.user.is_authenticated: try: rating = Rating.objects.get( user=request.user, article=article ) except Rating.DoesNotExist: raise NotFound( detail=error_messages['not_rated'] ) # for unauthenticated users if rating is None: average_rating = get_article_rating(article) if request.user.is_authenticated is False: return Response({ 'article': article.slug, 'average_rating': average_rating, 'rating': error_messages['login'] }, status=status.HTTP_200_OK) serializer = self.serializer_class(rating) return Response({ 'message': success_messages['message'], 'data': serializer.data }, status=status.HTTP_200_OK) def check_article_exists(article): if not article: return Response( error_messages['not_exist'], status=status.HTTP_404_NOT_FOUND )
[ "allannelly690@gmail.com" ]
allannelly690@gmail.com
0b0947020993479672eeb02266a80665dd70ff7d
45fbfce97cf1d3f3c013d0332a6cd7b747d2f99e
/forest/generic_visitor.py
9c89b8712b1fd3ca208e4cfba2344b980f8c290a
[ "Apache-2.0" ]
permissive
Marghrid/FOREST
f7d60de453f982973489a5d975c5d6ab9d902336
2f1730fd7cb1e3579fcce222c67b91c5bb1aa5f4
refs/heads/master
2023-02-15T23:42:27.268446
2021-01-18T19:10:30
2021-01-18T19:10:30
225,708,551
8
0
null
null
null
null
UTF-8
Python
false
false
846
py
import re from abc import ABC, abstractmethod first_cap_re = re.compile('(.)([A-Z][a-z]+)') all_cap_re = re.compile('([a-z0-9])([A-Z])') def camel_to_snake_case(name: str) -> str: s1 = first_cap_re.sub(r'\1_\2', name) return all_cap_re.sub(r'\1_\2', s1).lower() class GenericVisitor(ABC): @abstractmethod def __init__(self): pass def visit(self, node): method_name = self._visit_method_name(node) visitor = getattr(self, method_name, self.generic_visit) return visitor(node) def generic_visit(self, node): raise Exception( '{}: No {} method'.format( type(self).__name__, self._visit_method_name(node))) @staticmethod def _visit_method_name(node) -> str: return 'visit_' + camel_to_snake_case(type(node).__name__)
[ "marghrid@hotmail.com" ]
marghrid@hotmail.com
96c027ad20e48b93bb0341a4e5dfff106817bbc9
b2376f0cdb54a91e03a43e5eb945fffaaba96852
/eventticketingserver/admin.py
e9883325aa2ce44ce9d8b48b9f0416ba2b4a1841
[]
no_license
didietsuryadi/event-ticketing
9bf83129034c57f2474ba674e1bb9b32cee2cd74
f8e31c87ef5f5367618c857272ecf708bbdc0408
refs/heads/master
2020-03-23T04:17:54.023142
2018-07-19T05:27:04
2018-07-19T05:27:04
141,074,833
0
0
null
null
null
null
UTF-8
Python
false
false
152
py
from django.contrib import admin from .apiv1.models import * admin.site.register(Event) admin.site.register(Customer) admin.site.register(Transaction)
[ "didietsuryadi@gmail.com" ]
didietsuryadi@gmail.com
66c536ba9a3c636b67b6d1b208485d7038b48249
82c8626a0c48985ea53d22dd1665cbb6048c96bf
/project_dl/subjects/migrations/0003_student_parent_student_id.py
9ce5485f3d5dd9475996d9a6c420c0d6e08c970a
[]
no_license
alishertleukenov/My-project
50affe6091a3578fb1dd5a5d994dabab29498b20
31c1fd7bb8c28df2a297e670870cd005c10bb6b1
refs/heads/master
2020-11-23T18:47:44.752255
2019-12-13T07:12:26
2019-12-13T07:12:26
227,775,534
0
0
null
null
null
null
UTF-8
Python
false
false
483
py
# Generated by Django 2.2 on 2019-12-13 06:33 from django.db import migrations, models import django.utils.timezone class Migration(migrations.Migration): dependencies = [ ('subjects', '0002_auto_20191213_1151'), ] operations = [ migrations.AddField( model_name='student', name='parent_student_id', field=models.IntegerField(default=django.utils.timezone.now), preserve_default=False, ), ]
[ "alishertleukenov@gmail.com" ]
alishertleukenov@gmail.com
7258a56999d559da846963b4a487bde03ba0674a
62be0a7264b1709f53c2d3a672c19ad254a44c6c
/cardiod .py
0be021efaec72f07e0bfd1be22f1956d16a7d271
[]
no_license
sathish-selvan/python-turtle-
95d70bfde70d21f0c8e48884398e9b3b98fb03ac
4a4b96b977207e9875aedd2c3ef44cff64e7f5d1
refs/heads/master
2023-01-07T17:53:31.321643
2020-10-26T22:29:19
2020-10-26T22:29:19
306,972,287
3
0
null
null
null
null
UTF-8
Python
false
false
472
py
import turtle sat = turtle.Turtle() s =turtle.Screen() sat.pencolor("black") s.bgcolor("#72cc12") sat.penup() sat.speed(0) sat.goto(0,-300) sat.pendown() table = 2 points = [] division = 200 def draw(division): for i in range(division): points.append(sat.pos()) sat.circle(300,360/division) draw(division) for i in points: sat.goto(i) sat.goto(points[(points.index(i)*table)%division]) sat.goto(i) sat.hideturtle() turtle.done()
[ "sathish.t2000@gmail.comm" ]
sathish.t2000@gmail.comm
836db03763f356868cc4f33ccedd59f950e22ccb
5260bdd8b6ccd8e54c1d616d166b462a509d56c1
/clock.py
554b387796e2aebd42dda7e3817c75b17d8b2c98
[]
no_license
ankit96/flockathon-utility
f2f6a45ff3d5696afa8120a85c4f9ac28e6bbb91
c71499588b5cff4f96a7f3f4ca687b7f8da8c28e
refs/heads/master
2022-07-25T13:26:37.933335
2019-10-21T17:17:25
2019-10-21T17:17:25
216,624,518
1
0
null
2022-07-06T20:20:52
2019-10-21T17:19:55
Python
UTF-8
Python
false
false
430
py
from apscheduler.schedulers.blocking import BlockingScheduler import os import subprocess import psycopg2 import urlparse from deleteplaylist import deleteall import logging logging.basicConfig() sched = BlockingScheduler() @sched.scheduled_job('cron', day_of_week='mon-sun', hour=18, minute=31) #@sched.scheduled_job('interval', hours=7 , minutes=52) def scheduled_job(): deleteall() #scheduled_job()dsd sched.start()
[ "Ankit.sa" ]
Ankit.sa
ba3aca2ca5f17237e705f78170ba8e21b0c9ab3b
b61dfe7d7881241216a47f3f0f384731d91701e5
/cart/urls.py
0a52d9285b52b8bca10cac47fb042c93baf59793
[]
no_license
hieudp/Nigiri-Falls
4c6822222e550821384e1d4d788d695700d7dbb5
3845feb32e859750f40b74fefef5ce03bf2fc7b9
refs/heads/master
2021-06-28T22:37:22.980968
2020-01-31T16:04:18
2020-01-31T16:04:18
237,465,152
0
0
null
2021-06-10T22:32:15
2020-01-31T16:03:50
HTML
UTF-8
Python
false
false
296
py
from django.urls import path from . import views urlpatterns = [ path('', views.cart, name='cart'), path('confirmation/', views.confirmation, name='confirmation'), path('increase/', views.increase, name='increasecart'), path('decrease/', views.decrease, name='decreasecart'), ]
[ "hpham794@gmail.com" ]
hpham794@gmail.com
19add0a7828b188a74b5e8c1a7a9bb8358b7a0b9
b8dc2b323f74a9ece191e667598bfcac9fe16b12
/calc.py
1cf9ffcfda46fc67c30f254c220eb507b4e3b0eb
[]
no_license
SHowardGal/exerPythUnitTest
fa6484e365101b90c67c57d385d25c2c00c27fdb
b2fc6ae213aa42cc12ef201834b4817bc5340092
refs/heads/master
2023-08-11T11:38:42.008028
2021-10-06T01:10:44
2021-10-06T01:10:44
null
0
0
null
null
null
null
UTF-8
Python
false
false
322
py
def add(x, y): """Add Function""" return x + y def subtract(x, y): """Subtract Function""" return x - y def multiply(x, y): """Multiply Function""" return x * y def divide(x, y): """Divide Function""" if y == 0: raise ValueError('Can not divide by zero!') return x / y
[ "u0250173@utah.edu" ]
u0250173@utah.edu
b59c437e9488ef3d05b937ed48797e71bc060614
fe54d59a1a030a9c1395f4f4d3ef2e2b2ec48343
/build/lib/nailgun/objects/serializers/node.py
a2db68ad18b2230ba9ca3569cf67682031e2d880
[]
no_license
zbwzy/nailgun
38a4198a0630a1608c14e55bee03b5ed04ded3e8
2eaeece03ebc53f48791db2aa8e7d24c010910f2
refs/heads/master
2022-09-25T09:03:33.296368
2016-02-23T09:32:55
2016-02-23T09:32:55
52,345,460
0
0
null
2022-09-16T17:45:43
2016-02-23T09:03:07
Python
UTF-8
Python
false
false
2,488
py
# -*- coding: utf-8 -*- # Copyright 2013 Mirantis, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from nailgun import consts from nailgun.objects.serializers.base import BasicSerializer class NodeSerializer(BasicSerializer): fields = ( 'id', 'name', 'meta', 'progress', 'kernel_params', 'roles', 'pending_roles', 'status', 'mac', 'fqdn', 'ip', 'manufacturer', 'platform_name', 'pending_addition', 'pending_deletion', 'os_platform', 'error_type', 'online', 'cluster', 'network_data', 'group_id', 'node_type' ) class NodeInterfacesSerializer(BasicSerializer): nic_fields = ( 'id', 'mac', 'name', 'type', 'state', 'current_speed', 'max_speed', 'assigned_networks' ) bond_fields = ( 'mac', 'name', 'type', 'mode', 'state', 'assigned_networks' ) @classmethod def serialize_nic_interface(cls, instance, fields=None): return BasicSerializer.serialize( instance, fields=fields if fields else cls.nic_fields ) @classmethod def serialize_bond_interface(cls, instance, fields=None): data_dict = BasicSerializer.serialize( instance, fields=fields if fields else cls.bond_fields ) data_dict['slaves'] = [{'name': slave.name} for slave in instance.slaves] return data_dict @classmethod def serialize(cls, instance, fields=None): iface_types = consts.NETWORK_INTERFACE_TYPES if instance.type == iface_types.ether: return cls.serialize_nic_interface(instance) elif instance.type == iface_types.bond: return cls.serialize_bond_interface(instance)
[ "zhangbai2008@gmail.com" ]
zhangbai2008@gmail.com
86475d131fa05abd008300fc90b31ad73c5238ce
e14eb7457d93f2c8090147f767e87fd75e17a8ac
/Code05-04 Event 01.py
c89b55d906cd0a07bd7e26f1106aa2f39d647363
[]
no_license
leegj93/PythonProject
6992653576451c7d398cabc7d4e8575b8f7f784a
5accab4cf10fcadc1fbf5b0f39563cfdc8703484
refs/heads/master
2022-02-03T13:28:44.608943
2019-07-02T07:15:57
2019-07-02T07:15:57
194,589,181
0
0
null
null
null
null
UTF-8
Python
false
false
787
py
from tkinter import * from tkinter import messagebox def clickLeft(event): txt ='' if event.num ==1: txt +='왼쪽 버튼 : ' elif event.num ==2: txt +='가운데 버튼 : ' else: txt +='오른쪽 버트 : ' txt += str(event.x) + "," + str(event.y) messagebox.showinfo('요기제목', txt) def keyPress(event): messagebox.showinfo('요기제목', chr(event.keycode)) # messagebox.showinfo('요기제목','요기내용') window = Tk() window.geometry("500x300") photo = PhotoImage(file='C:\Images\Pet_GIF\Pet_GIF(256x256)\cat01_256.gif') label1 = Label(window, image=photo) label1.bind("<Button>", clickLeft) window.bind("<Key>", keyPress) label1.pack(expand=1, anchor=CENTER) window.mainloop()
[ "noreply@github.com" ]
noreply@github.com
2f16eac975ec42c2015734ffd0e032fa1d33e9a5
415bb6c4d47972b19b5e59da5c93f579791184cf
/fibonacci.py
0f180173efea3b8cd44519f301fa138a58a2d358
[]
no_license
akshay007-eng/projectpygit
e22c629a45d86c329a0b40b63c5e0536321ac25e
386db6e30466bc8f47e924ceaf187080a78ea7b2
refs/heads/master
2022-11-19T09:30:22.607854
2020-07-03T17:19:22
2020-07-03T17:19:22
276,948,605
0
0
null
null
null
null
UTF-8
Python
false
false
123
py
# Fibonacci series: # the sum of two elements defines the next a, b = 0, 1 while b < 10: print (b) a, b = b, a+b
[ "noreply@github.com" ]
noreply@github.com
efdbbaf125546b22e79da1e189dd44d713d68223
487ce91881032c1de16e35ed8bc187d6034205f7
/codes/CodeJamCrawler/16_0_2_neat/16_0_2_jolley_Pancakes.py
0f7c8e1f03d564dbbb9de3c313d22706fa0aea19
[]
no_license
DaHuO/Supergraph
9cd26d8c5a081803015d93cf5f2674009e92ef7e
c88059dc66297af577ad2b8afa4e0ac0ad622915
refs/heads/master
2021-06-14T16:07:52.405091
2016-08-21T13:39:13
2016-08-21T13:39:13
49,829,508
2
0
null
2021-03-19T21:55:46
2016-01-17T18:23:00
Python
UTF-8
Python
false
false
972
py
# -*- coding: utf-8 -*- """ Created on Sat Apr 9 18:01:19 2016 @author: jo """ with open('input', 'r') as f: cases = 0 case = 0 with open('outputPan', 'w') as fo: for line in f: if line[0].isdigit(): cases = int(line) #print(line) else: case +=1 last = True flips = 0 for c in xrange(len(line)): positive = True if line[c] == '-': positive = False if c == 0: last = positive else: if positive != last: flips +=1 if c == (len(line)-1): if positive != True: flips += 1 fo.write('Case #' + str(case) + ': ' + str(flips) + '\n') last = positive
[ "[dhuo@tcd.ie]" ]
[dhuo@tcd.ie]
1e54a80cf532a812739ef90aab6c8f287c71d698
6a8ff478a3a9a9108b87a051e664f59557985fa5
/example/tests/test_relations.py
b83bbef46887e42b0c866a98995d4e942d5cf476
[ "BSD-2-Clause" ]
permissive
HumanExposure/MMDB
445a650e834e38a6539863f1b4549de2e9653455
423a4d6484c13f5969ca44ddfc156808b1978b23
refs/heads/dev
2023-03-04T09:04:08.886345
2021-02-02T17:44:17
2021-02-02T17:44:17
315,435,637
1
0
BSD-2-Clause
2021-02-02T17:44:19
2020-11-23T20:41:51
Python
UTF-8
Python
false
false
10,909
py
from __future__ import absolute_import from django.test.client import RequestFactory from django.utils import timezone from rest_framework import serializers from rest_framework.fields import SkipField from rest_framework.reverse import reverse from rest_framework_json_api.exceptions import Conflict from rest_framework_json_api.relations import ( HyperlinkedRelatedField, ResourceRelatedField, SerializerMethodHyperlinkedRelatedField, ) from rest_framework_json_api.utils import format_resource_type from . import TestBase from example.models import Author, Blog, Comment, Entry from example.serializers import CommentSerializer from example.views import EntryViewSet class TestResourceRelatedField(TestBase): def setUp(self): super(TestResourceRelatedField, self).setUp() self.blog = Blog.objects.create(name="Some Blog", tagline="It's a blog") self.entry = Entry.objects.create( blog=self.blog, headline="headline", body_text="body_text", pub_date=timezone.now(), mod_date=timezone.now(), n_comments=0, n_pingbacks=0, rating=3, ) for i in range(1, 6): name = "some_author{}".format(i) self.entry.authors.add( Author.objects.create(name=name, email="{}@example.org".format(name)) ) self.comment = Comment.objects.create( entry=self.entry, body="testing one two three", author=Author.objects.first(), ) def test_data_in_correct_format_when_instantiated_with_blog_object(self): serializer = BlogFKSerializer(instance={"blog": self.blog}) expected_data = {"type": format_resource_type("Blog"), "id": str(self.blog.id)} actual_data = serializer.data["blog"] self.assertEqual(actual_data, expected_data) def test_data_in_correct_format_when_instantiated_with_entry_object(self): serializer = EntryFKSerializer(instance={"entry": self.entry}) expected_data = { "type": format_resource_type("Entry"), "id": str(self.entry.id), } actual_data = serializer.data["entry"] self.assertEqual(actual_data, expected_data) def test_deserialize_primitive_data_blog(self): serializer = BlogFKSerializer( data={ "blog": {"type": format_resource_type("Blog"), "id": str(self.blog.id)} } ) self.assertTrue(serializer.is_valid()) self.assertEqual(serializer.validated_data["blog"], self.blog) def test_validation_fails_for_wrong_type(self): with self.assertRaises(Conflict) as cm: serializer = BlogFKSerializer( data={"blog": {"type": "Entries", "id": str(self.blog.id)}} ) serializer.is_valid() the_exception = cm.exception self.assertEqual(the_exception.status_code, 409) def test_serialize_many_to_many_relation(self): serializer = EntryModelSerializer(instance=self.entry) type_string = format_resource_type("Author") author_pks = Author.objects.values_list("pk", flat=True) expected_data = [{"type": type_string, "id": str(pk)} for pk in author_pks] self.assertEqual(serializer.data["authors"], expected_data) def test_deserialize_many_to_many_relation(self): type_string = format_resource_type("Author") author_pks = Author.objects.values_list("pk", flat=True) authors = [{"type": type_string, "id": pk} for pk in author_pks] serializer = EntryModelSerializer(data={"authors": authors, "comments": []}) self.assertTrue(serializer.is_valid()) self.assertEqual( len(serializer.validated_data["authors"]), Author.objects.count() ) for author in serializer.validated_data["authors"]: self.assertIsInstance(author, Author) def test_read_only(self): serializer = EntryModelSerializer( data={"authors": [], "comments": [{"type": "Comments", "id": 2}]} ) serializer.is_valid(raise_exception=True) self.assertNotIn("comments", serializer.validated_data) def test_invalid_resource_id_object(self): comment = { "body": "testing 123", "entry": {"type": "entry"}, "author": {"id": "5"}, } serializer = CommentSerializer(data=comment) assert not serializer.is_valid() assert serializer.errors == { "author": ["Invalid resource identifier object: missing 'type' attribute"], "entry": ["Invalid resource identifier object: missing 'id' attribute"], } class TestHyperlinkedFieldBase(TestBase): def setUp(self): super(TestHyperlinkedFieldBase, self).setUp() self.blog = Blog.objects.create(name="Some Blog", tagline="It's a blog") self.entry = Entry.objects.create( blog=self.blog, headline="headline", body_text="body_text", pub_date=timezone.now(), mod_date=timezone.now(), n_comments=0, n_pingbacks=0, rating=3, ) self.comment = Comment.objects.create( entry=self.entry, body="testing one two three", ) self.request = RequestFactory().get( reverse("entry-detail", kwargs={"pk": self.entry.pk}) ) self.view = EntryViewSet( request=self.request, kwargs={"entry_pk": self.entry.id} ) class TestHyperlinkedRelatedField(TestHyperlinkedFieldBase): def test_single_hyperlinked_related_field(self): field = HyperlinkedRelatedField( related_link_view_name="entry-blog", related_link_url_kwarg="entry_pk", self_link_view_name="entry-relationships", read_only=True, ) field._context = {"request": self.request, "view": self.view} field.field_name = "blog" self.assertRaises(NotImplementedError, field.to_representation, self.entry) self.assertRaises(SkipField, field.get_attribute, self.entry) links_expected = { "self": "http://testserver/entries/{}/relationships/blog".format( self.entry.pk ), "related": "http://testserver/entries/{}/blog".format(self.entry.pk), } got = field.get_links(self.entry) self.assertEqual(got, links_expected) def test_many_hyperlinked_related_field(self): field = HyperlinkedRelatedField( related_link_view_name="entry-comments", related_link_url_kwarg="entry_pk", self_link_view_name="entry-relationships", read_only=True, many=True, ) field._context = {"request": self.request, "view": self.view} field.field_name = "comments" self.assertRaises( NotImplementedError, field.to_representation, self.entry.comments.all() ) self.assertRaises(SkipField, field.get_attribute, self.entry) links_expected = { "self": "http://testserver/entries/{}/relationships/comments".format( self.entry.pk ), "related": "http://testserver/entries/{}/comments".format(self.entry.pk), } got = field.child_relation.get_links(self.entry) self.assertEqual(got, links_expected) class TestSerializerMethodHyperlinkedRelatedField(TestHyperlinkedFieldBase): def test_single_serializer_method_hyperlinked_related_field(self): serializer = EntryModelSerializerWithHyperLinks( instance=self.entry, context={"request": self.request, "view": self.view} ) field = serializer.fields["blog"] self.assertRaises(NotImplementedError, field.to_representation, self.entry) self.assertRaises(SkipField, field.get_attribute, self.entry) expected = { "self": "http://testserver/entries/{}/relationships/blog".format( self.entry.pk ), "related": "http://testserver/entries/{}/blog".format(self.entry.pk), } got = field.get_links(self.entry) self.assertEqual(got, expected) def test_many_serializer_method_hyperlinked_related_field(self): serializer = EntryModelSerializerWithHyperLinks( instance=self.entry, context={"request": self.request, "view": self.view} ) field = serializer.fields["comments"] self.assertRaises(NotImplementedError, field.to_representation, self.entry) self.assertRaises(SkipField, field.get_attribute, self.entry) expected = { "self": "http://testserver/entries/{}/relationships/comments".format( self.entry.pk ), "related": "http://testserver/entries/{}/comments".format(self.entry.pk), } got = field.get_links(self.entry) self.assertEqual(got, expected) def test_get_blog(self): serializer = EntryModelSerializerWithHyperLinks(instance=self.entry) got = serializer.get_blog(self.entry) expected = self.entry.blog self.assertEqual(got, expected) def test_get_comments(self): serializer = EntryModelSerializerWithHyperLinks(instance=self.entry) got = serializer.get_comments(self.entry) expected = self.entry.comments.all() self.assertListEqual(list(got), list(expected)) class BlogResourceRelatedField(ResourceRelatedField): def get_queryset(self): return Blog.objects class BlogFKSerializer(serializers.Serializer): blog = BlogResourceRelatedField() class EntryFKSerializer(serializers.Serializer): entry = ResourceRelatedField(queryset=Entry.objects) class EntryModelSerializer(serializers.ModelSerializer): authors = ResourceRelatedField(many=True, queryset=Author.objects) comments = ResourceRelatedField(many=True, read_only=True) class Meta: model = Entry fields = ("authors", "comments") class EntryModelSerializerWithHyperLinks(serializers.ModelSerializer): blog = SerializerMethodHyperlinkedRelatedField( related_link_view_name="entry-blog", related_link_url_kwarg="entry_pk", self_link_view_name="entry-relationships", many=True, ) comments = SerializerMethodHyperlinkedRelatedField( related_link_view_name="entry-comments", related_link_url_kwarg="entry_pk", self_link_view_name="entry-relationships", many=True, ) class Meta: model = Entry fields = ( "blog", "comments", ) def get_blog(self, obj): return obj.blog def get_comments(self, obj): return obj.comments.all()
[ "noreply@github.com" ]
noreply@github.com
faf1d1399fc87b802e63cef3024960af543e1e5d
a0b93f2723d13fefc224478729c08e4aac86266d
/test_query.py
6aae8ba1a2d7d83ba446eb4c7f59ffbb70293e86
[]
no_license
diegohwang/queryset
a4ef485d3f8149eb70e19b97f5e303c124d83720
a4e05bb9daafb9bbe7e50381198712ca0b4b6b3d
refs/heads/master
2021-01-21T14:48:02.318381
2016-06-14T03:18:48
2016-06-14T03:18:48
58,798,568
0
0
null
null
null
null
UTF-8
Python
false
false
4,593
py
#!/usr/bin/python # -*- coding:utf-8 -*- from queryset.wsgi import * from video.models import site, media class QuerySetOperation(): #1. 创建对象(4种方法) #1.1 def create_site1(self): print '创建方法1' site.objects.create(site_name='BT天堂', site_code='bttiantang', site_id=1) #1.2 def create_site2(self): print '创建方法2' new_site = site(site_name='高清mp4吧', site_code='mp4ba', site_id=2) new_site.save() #1.3 def create_site3(self): print '创建方法3' new_site = site() new_site.site_name = '海盗窝' new_site.site_code = 'hdwo' new_site.site_id = 3 new_site.save() #1.4 首先尝试获取,不存在就创建,可以防止重复(返回值元组(object, True/False)) def create_site4(self): print '创建方法4' site.objects.get_or_create(site_name='优酷', site_code='youku', site_id=4) #2. 获取对象(过滤) #2.1 查询所有 def get_all(self): all_site = site.objects.all() #获取数量 print type(all_site) print all_site.count() #QuerySet是可以迭代的 for s in all_site: print 'the site_id %d is %s' % (s.site_id, s.site_name) #2.2 切片查询(不支持负索引) def get_slice(self): all_site = site.objects.all() slice_site = all_site[0:2] for s in slice_site: print s #2.3 条件唯一查询 def get_one(self): #site_name为'优酷'的含义多条时,会报错 st = site.objects.get(site_name='优酷') print st print type(st) #2.4 条件过滤 #2.4.1 等于 def filter_equal(self): st = site.objects.filter(site_code='youku') #st = site.objects.filter(site_code_exact='youku') print st #2.4.2 忽略大小写的等于 def filter_equal_iexact(self): st = site.objects.filter(site_code__ieact='YouKu') print st #2.4.3 包含 def filter_contains(self): st = site.objects.filter(site_code__contains='ku') print st #2.4.4 忽略大小写的包含 def filter_icontain(self): st = site.objects.filter(site_code__contains='KU') print st #2.4.5 正则表达式过滤 def filter_regex(self): st = site.objects.filter(site_code__regex='^you') print st #2.4.6 忽略正则表达式过滤 def filter_iregex(self): st = site.objects.filter(site_code__iregex='^YoU') print st #2.4.7 排除 def exclude(self): st = site.objects.exclude(site_code__exact='youku') print st #3 排序 #3.1 从小到大 def sort_asc(self): sites = site.objects.all().order_by('site_id') for s in sites: print 'the site_id %d is %s' % (s.site_id, s.site_name) #3.2 从大到小 def sort_desc(self): sites = site.objects.all().order_by('-site_id') for s in sites: print 'the site_id %d is %s' % (s.site_id, s.site_name) #4 更新 def update(self): st = site.objects.filter(site_name='优酷') st.update(site_name='爱奇艺', site_code='iqiyi') #5 删除 def delete_site(self): st = site.objects.all() print st.count() st.delete() print st.count() def delete_media(self): m = media.objects.all() print m.count() m.delete() print m.count() def goc1(self): st = site.objects.get_or_create(site_name='搜狐', site_code='sohu', defaults={'site_id':6}) print st def uoc1(self): st = site.objects.update_or_create(site_name='乐视', site_code='letv', site_id=5, defaults={'site_name':u'搜狐', 'site_code':'sohu', 'site_id':5}) print st def uoc2(self): st = site.objects.update_or_create(site_name='搜狐', site_code='sohu', site_id=5, defaults={'site_name':u'乐视', 'site_code':'letv', 'site_id':5}) print st def uoc3(self): st = site.objects.update_or_create(site_name='搜狐1', site_code='sohu1', site_id=6, defaults=None) print st if __name__ == "__main__": qso = QuerySetOperation() #qso.create_site1() #qso.create_site2() #qso.create_site3() #qso.create_site4() #qso.get_all() #qso.get_one() #qso.update() qso.delete_site() #qso.uoc1() #qso.uoc2() #qso.uoc3() #qso.delete_media() #qso.goc1()
[ "huangjm@fun.tv" ]
huangjm@fun.tv
19699e97e03bf4d6c752d7f7051767d2f6178dc2
0aaa183922f5304ea11509e62a5b4e73ffa58525
/柏鹭杯/11.25/notepad.py
cc9e291beb0eb560a4325ed8eb0d7034a5fcda7c
[]
no_license
0gur1/ctf
be07fbe0acf3a4de7afb078fe7cabc5c9d27822c
66eb36c73ef18759a0a7639d4c4f67bf80796919
refs/heads/master
2021-06-28T01:58:18.763949
2019-01-30T04:26:39
2019-01-30T04:26:39
131,176,877
0
0
null
null
null
null
UTF-8
Python
false
false
500
py
from pwn import * context.log_level = 'debug' p=process('./notepad') def new(data,size): p.sendlineafter(">> ",'1') p.sendlineafter("Size: ",str(size)) p.sendlineafter("Data: \n",data) def edit(idx,data,size): p.sendlineafter(">> ",'2') p.sendlineafter("Index: ",str(idx)) p.sendlineafter("Size: ",str(size)) p.sendlineafter("Data: ",data) def prin(idx): p.sendlineafter(">> ",'3') p.sendlineafter("Index: ",str(idx)) new('123',4)#0 new('123',4)#1 edit(0,'1'*7,16) prin(0) p.interactive()
[ "904466829@qq.com" ]
904466829@qq.com
609760859820be1e68a6de0cb45de2de2a4b6eb9
b77e464c1051dbec0dea6deaf63ccc393c17c84c
/tests/test_base.py
b49f58ee4e9aca182c4a93894ccbbe58618c0117
[ "Unlicense" ]
permissive
victtorvpb/flask-cash-back-plataform
63dad5677811df8d24999a6c4ad5e46d91d87dcd
301bcad96662e7ba8f74b8e6896248f2ac2854d3
refs/heads/main
2023-07-12T02:46:23.526791
2021-08-16T23:01:11
2021-08-16T23:01:32
397,004,794
0
0
null
null
null
null
UTF-8
Python
false
false
389
py
import pytest from flask_cash_back_plataform import BaseClass, base_function given = pytest.mark.parametrize @given("fn", [BaseClass(), base_function]) def test_parameterized(fn): assert "hello from" in fn() def test_base_function(): assert base_function() == "hello from base function" def test_base_class(): assert BaseClass().base_method() == "hello from BaseClass"
[ "actions@github.com" ]
actions@github.com
4fc79439d5cdb7cacba4370b7e8d37f14b961c4a
ac32bac45df77083f4ef3115e747038a6753936c
/adapter-transformers-customs/adapter-transformers-attn/src/transformers/trainer-with-sub-model-list.py
4c0c31f94fbf40ec2a6cf77be31c8626e614571d
[ "Apache-2.0" ]
permissive
Yujin-Yujin/rexpert
13e1d5c4ca55664dd9fbb9a765ea5157a2e0893f
ed8628dc053194fee40e593b1cc5ec45a26c8073
refs/heads/main
2023-06-22T05:58:42.269923
2021-07-23T06:35:43
2021-07-23T06:35:43
373,423,887
0
0
null
null
null
null
UTF-8
Python
false
false
82,655
py
# coding=utf-8 # Copyright 2020-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ The Trainer class, to easily train a 🤗 Transformers from scratch or finetune it on a new task. """ import collections import inspect import math import os import re import shutil import warnings from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union # Integrations must be imported before ML frameworks: from .integrations import ( # isort: split default_hp_search_backend, hp_params, is_azureml_available, is_comet_available, is_mlflow_available, is_optuna_available, is_ray_available, is_tensorboard_available, is_wandb_available, run_hp_search_optuna, run_hp_search_ray, ) import numpy as np import torch from packaging import version from torch import nn from torch.utils.data.dataloader import DataLoader from torch.utils.data.dataset import Dataset from torch.utils.data.distributed import DistributedSampler from torch.utils.data.sampler import RandomSampler, SequentialSampler from .data.data_collator import DataCollator, DataCollatorWithPadding, default_data_collator from .file_utils import WEIGHTS_NAME, is_datasets_available, is_in_notebook, is_torch_tpu_available from .modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING from .modeling_utils import PreTrainedModel from .optimization import AdamW, get_linear_schedule_with_warmup from .tokenization_utils_base import PreTrainedTokenizerBase from .trainer_callback import ( CallbackHandler, DefaultFlowCallback, PrinterCallback, ProgressCallback, TrainerCallback, TrainerControl, TrainerState, ) from .trainer_pt_utils import ( DistributedTensorGatherer, SequentialDistributedSampler, distributed_broadcast_scalars, distributed_concat, get_tpu_sampler, nested_concat, nested_detach, nested_numpify, nested_xla_mesh_reduce, reissue_pt_warnings, ) from .trainer_utils import ( PREFIX_CHECKPOINT_DIR, BestRun, EvalPrediction, HPSearchBackend, PredictionOutput, TrainOutput, default_compute_objective, default_hp_space, set_seed, ) from .training_args import TrainingArguments from .utils import logging _use_native_amp = False _use_apex = False DEFAULT_CALLBACKS = [DefaultFlowCallback] DEFAULT_PROGRESS_CALLBACK = ProgressCallback if is_in_notebook(): from .utils.notebook import NotebookProgressCallback DEFAULT_PROGRESS_CALLBACK = NotebookProgressCallback # Check if Pytorch version >= 1.6 to switch between Native AMP and Apex if version.parse(torch.__version__) < version.parse("1.6"): from .file_utils import is_apex_available if is_apex_available(): from apex import amp _use_apex = True else: _use_native_amp = True from torch.cuda.amp import autocast if version.parse(torch.__version__) < version.parse("1.2"): _use_ddp_no_sync = False else: _use_ddp_no_sync = True if is_datasets_available(): import datasets if is_torch_tpu_available(): import torch_xla.core.xla_model as xm import torch_xla.debug.metrics as met import torch_xla.distributed.parallel_loader as pl if is_tensorboard_available(): from .integrations import TensorBoardCallback DEFAULT_CALLBACKS.append(TensorBoardCallback) if is_wandb_available(): from .integrations import WandbCallback DEFAULT_CALLBACKS.append(WandbCallback) if is_comet_available(): from .integrations import CometCallback DEFAULT_CALLBACKS.append(CometCallback) if is_mlflow_available(): from .integrations import MLflowCallback DEFAULT_CALLBACKS.append(MLflowCallback) if is_optuna_available(): import optuna if is_ray_available(): from ray import tune if is_azureml_available(): from .integrations import AzureMLCallback DEFAULT_CALLBACKS.append(AzureMLCallback) logger = logging.get_logger(__name__) class Trainer: """ Trainer is a simple but feature-complete training and eval loop for PyTorch, optimized for 🤗 Transformers. Args: model (:class:`~transformers.PreTrainedModel` or :obj:`torch.nn.Module`, `optional`): The model to train, evaluate or use for predictions. If not provided, a ``model_init`` must be passed. .. note:: :class:`~transformers.Trainer` is optimized to work with the :class:`~transformers.PreTrainedModel` provided by the library. You can still use your own models defined as :obj:`torch.nn.Module` as long as they work the same way as the 🤗 Transformers models. args (:class:`~transformers.TrainingArguments`, `optional`): The arguments to tweak for training. Will default to a basic instance of :class:`~transformers.TrainingArguments` with the ``output_dir`` set to a directory named `tmp_trainer` in the current directory if not provided. data_collator (:obj:`DataCollator`, `optional`): The function to use to form a batch from a list of elements of :obj:`train_dataset` or :obj:`eval_dataset`. Will default to :func:`~transformers.default_data_collator` if no ``tokenizer`` is provided, an instance of :func:`~transformers.DataCollatorWithPadding` otherwise. train_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): The dataset to use for training. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): The dataset to use for evaluation. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. tokenizer (:class:`PreTrainedTokenizerBase`, `optional`): The tokenizer used to preprocess the data. If provided, will be used to automatically pad the inputs the maximum length when batching inputs, and it will be saved along the model to make it easier to rerun an interrupted training or reuse the fine-tuned model. model_init (:obj:`Callable[[], PreTrainedModel]`, `optional`): A function that instantiates the model to be used. If provided, each call to :meth:`~transformers.Trainer.train` will start from a new instance of the model as given by this function. The function may have zero argument, or a single one containing the optuna/Ray Tune trial object, to be able to choose different architectures according to hyper parameters (such as layer count, sizes of inner layers, dropout probabilities etc). compute_metrics (:obj:`Callable[[EvalPrediction], Dict]`, `optional`): The function that will be used to compute metrics at evaluation. Must take a :class:`~transformers.EvalPrediction` and return a dictionary string to metric values. callbacks (List of :obj:`~transformers.TrainerCallback`, `optional`): A list of callbacks to customize the training loop. Will add those to the list of default callbacks detailed in :doc:`here <callback>`. If you want to remove one of the default callbacks used, use the :meth:`Trainer.remove_callback` method. optimizers (:obj:`Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR`, `optional`): A tuple containing the optimizer and the scheduler to use. Will default to an instance of :class:`~transformers.AdamW` on your model and a scheduler given by :func:`~transformers.get_linear_schedule_with_warmup` controlled by :obj:`args`. kwargs: Deprecated keyword arguments. """ def __init__( self, model: Union[PreTrainedModel, torch.nn.Module] = None, sub_model_list: Optional[List[Union[PreTrainedModel, torch.nn.Module]]] = None, args: TrainingArguments = None, data_collator: Optional[DataCollator] = None, train_dataset: Optional[Dataset] = None, eval_dataset: Optional[Dataset] = None, tokenizer: Optional["PreTrainedTokenizerBase"] = None, model_init: Callable[[], PreTrainedModel] = None, compute_metrics: Optional[Callable[[EvalPrediction], Dict]] = None, callbacks: Optional[List[TrainerCallback]] = None, do_save_full_model: bool = True, do_save_adapters: bool = False, do_save_adapter_fusion: bool = False, adapter_names: Optional[List[List[str]]] = None, optimizers: Tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None), **kwargs, ): if args is None: logger.info("No `TrainingArguments` passed, using the current path as `output_dir`.") args = TrainingArguments("tmp_trainer") self.args = args # Seed must be set before instantiating the model when using model set_seed(self.args.seed) assert ( model is not None or model_init is not None ), "You must provide a model to use `Trainer`, either by using the `model` argument or the `model_init` argument." self.model_init = model_init self.hp_name = None if model is None and model_init is not None: model = self.call_model_init() self.model = model.to(args.device) if model is not None else None if sub_model_list is None: self.sub_model_list = None else: if len(sub_model_list) > 0 : self.sub_model_list = nn.ModuleList(sub_model_list).to(args.device) else: self.sub_model_list = None default_collator = default_data_collator if tokenizer is None else DataCollatorWithPadding(tokenizer) self.data_collator = data_collator if data_collator is not None else default_collator self.train_dataset = train_dataset self.eval_dataset = eval_dataset self.tokenizer = tokenizer self.compute_metrics = compute_metrics self.optimizer, self.lr_scheduler = optimizers if model_init is not None and (self.optimizer is not None or self.lr_scheduler is not None): raise RuntimeError( "Passing a `model_init` is incompatible with providing the `optimizers` argument." "You should subclass `Trainer` and override the `create_optimizer_and_scheduler` method." ) callbacks = DEFAULT_CALLBACKS if callbacks is None else DEFAULT_CALLBACKS + callbacks self.callback_handler = CallbackHandler(callbacks, self.model, self.optimizer, self.lr_scheduler) self.add_callback(PrinterCallback if self.args.disable_tqdm else DEFAULT_PROGRESS_CALLBACK) # Deprecated arguments if "tb_writer" in kwargs: warnings.warn( "Passing `tb_writer` as a keyword argument is deprecated and won't be possible in a " + "future version. Use `TensorBoardCallback(tb_writer=...)` instead and pass it to the `callbacks`" + "argument", FutureWarning, ) tb_writer = kwargs.pop("tb_writer") self.remove_callback(TensorBoardCallback) self.add_callback(TensorBoardCallback(tb_writer=tb_writer)) if "prediction_loss_only" in kwargs: warnings.warn( "Passing `prediction_loss_only` as a keyword argument is deprecated and won't be possible in a " + "future version. Use `args.prediction_loss_only` instead. Setting " + f"`args.prediction_loss_only={kwargs['prediction_loss_only']}", FutureWarning, ) self.args.prediction_loss_only = kwargs.pop("prediction_loss_only") assert kwargs == {}, f"Unexpected keyword arguments: {list(kwargs.keys())}." # Will be set to True by `self._setup_loggers()` on first call to `self.log()`. self._loggers_initialized = False # Create output directory if needed if self.is_world_process_zero(): os.makedirs(self.args.output_dir, exist_ok=True) # adapters used self.do_save_full_model = do_save_full_model self.do_save_adapters = do_save_adapters self.do_save_adapter_fusion = do_save_adapter_fusion self.adapter_names = adapter_names if is_torch_tpu_available() and isinstance(self.model, PreTrainedModel): # Set an xla_device flag on the model's config. # We'll find a more elegant and not need to do this in the future. self.model.config.xla_device = True if not callable(self.data_collator) and callable(getattr(self.data_collator, "collate_batch", None)): self.data_collator = self.data_collator.collate_batch warnings.warn( ( "The `data_collator` should now be a simple callable (function, class with `__call__`), classes " + "with a `collate_batch` are deprecated and won't be supported in a future version." ), FutureWarning, ) if args.max_steps > 0: logger.info("max_steps is given, it will override any value given in num_train_epochs") # Enforce rules on using datasets with no __len__ if train_dataset is not None and not isinstance(train_dataset, collections.abc.Sized) and args.max_steps <= 0: raise ValueError("train_dataset does not implement __len__, max_steps has to be specified") if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized): raise ValueError("eval_dataset must implement __len__") if is_datasets_available(): if isinstance(train_dataset, datasets.Dataset): self._remove_unused_columns(self.train_dataset, description="training") if isinstance(eval_dataset, datasets.Dataset): self._remove_unused_columns(self.eval_dataset, description="evaluation") self.state = TrainerState() self.control = TrainerControl() # Internal variable for total_flos used to count as tensors (for distributed + TPU), will be sent in the # state at each call to self.log. self._total_flos = None if self.args.fp16 and _use_native_amp: self.scaler = torch.cuda.amp.GradScaler() self.hp_search_backend = None self.use_tune_checkpoints = False default_label_names = ( ["start_positions", "end_positions"] if type(self.model) in MODEL_FOR_QUESTION_ANSWERING_MAPPING.values() else ["labels"] ) self.label_names = default_label_names if self.args.label_names is None else self.args.label_names self.control = self.callback_handler.on_init_end(self.args, self.state, self.control) def add_callback(self, callback): """ Add a callback to the current list of :class:`~transformer.TrainerCallback`. Args: callback (:obj:`type` or :class:`~transformer.TrainerCallback`): A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`. In the first case, will instantiate a member of that class. """ self.callback_handler.add_callback(callback) def pop_callback(self, callback): """ Remove a callback from the current list of :class:`~transformer.TrainerCallback` and returns it. If the callback is not found, returns :obj:`None` (and no error is raised). Args: callback (:obj:`type` or :class:`~transformer.TrainerCallback`): A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`. In the first case, will pop the first member of that class found in the list of callbacks. Returns: :class:`~transformer.TrainerCallback`: The callback removed, if found. """ return self.callback_handler.pop_callback(callback) def remove_callback(self, callback): """ Remove a callback from the current list of :class:`~transformer.TrainerCallback`. Args: callback (:obj:`type` or :class:`~transformer.TrainerCallback`): A :class:`~transformer.TrainerCallback` class or an instance of a :class:`~transformer.TrainerCallback`. In the first case, will remove the first member of that class found in the list of callbacks. """ self.callback_handler.remove_callback(callback) def _remove_unused_columns(self, dataset: "datasets.Dataset", description: Optional[str] = None): if not self.args.remove_unused_columns: return # Inspect model forward signature to keep only the arguments it accepts. signature = inspect.signature(self.model.forward) signature_columns = list(signature.parameters.keys()) # Labels may be named label or label_ids, the default data collator handles that. signature_columns += ["label", "label_ids"] columns = [k for k in signature_columns if k in dataset.column_names] ignored_columns = list(set(dataset.column_names) - set(signature_columns)) dset_description = "" if description is None else f"in the {description} set " logger.info( f"The following columns {dset_description}don't have a corresponding argument in `{self.model.__class__.__name__}.forward` and have been ignored: {', '.join(ignored_columns)}." ) dataset.set_format(type=dataset.format["type"], columns=columns) def _get_train_sampler(self) -> Optional[torch.utils.data.sampler.Sampler]: if isinstance(self.train_dataset, torch.utils.data.IterableDataset) or not isinstance( self.train_dataset, collections.abc.Sized ): return None elif is_torch_tpu_available(): return get_tpu_sampler(self.train_dataset) else: return ( RandomSampler(self.train_dataset) if self.args.local_rank == -1 else DistributedSampler(self.train_dataset) ) def get_train_dataloader(self) -> DataLoader: """ Returns the training :class:`~torch.utils.data.DataLoader`. Will use no sampler if :obj:`self.train_dataset` does not implement :obj:`__len__`, a random sampler (adapted to distributed training if necessary) otherwise. Subclass and override this method if you want to inject some custom behavior. """ if self.train_dataset is None: raise ValueError("Trainer: training requires a train_dataset.") train_sampler = self._get_train_sampler() return DataLoader( self.train_dataset, batch_size=self.args.train_batch_size, sampler=train_sampler, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, ) def _get_eval_sampler(self, eval_dataset: Dataset) -> Optional[torch.utils.data.sampler.Sampler]: if is_torch_tpu_available(): return SequentialDistributedSampler(eval_dataset, num_replicas=xm.xrt_world_size(), rank=xm.get_ordinal()) elif self.args.local_rank != -1: return SequentialDistributedSampler(eval_dataset) else: return SequentialSampler(eval_dataset) def get_eval_dataloader(self, eval_dataset: Optional[Dataset] = None) -> DataLoader: """ Returns the evaluation :class:`~torch.utils.data.DataLoader`. Subclass and override this method if you want to inject some custom behavior. Args: eval_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): If provided, will override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`. """ if eval_dataset is None and self.eval_dataset is None: raise ValueError("Trainer: evaluation requires an eval_dataset.") elif eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized): raise ValueError("eval_dataset must implement __len__") elif is_datasets_available() and isinstance(eval_dataset, datasets.Dataset): self._remove_unused_columns(eval_dataset, description="evaluation") eval_dataset = eval_dataset if eval_dataset is not None else self.eval_dataset eval_sampler = self._get_eval_sampler(eval_dataset) return DataLoader( eval_dataset, sampler=eval_sampler, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, num_workers=self.args.dataloader_num_workers, ) def get_test_dataloader(self, test_dataset: Dataset) -> DataLoader: """ Returns the test :class:`~torch.utils.data.DataLoader`. Subclass and override this method if you want to inject some custom behavior. Args: test_dataset (:obj:`torch.utils.data.dataset.Dataset`, `optional`): The test dataset to use. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement :obj:`__len__`. """ if not isinstance(test_dataset, collections.abc.Sized): raise ValueError("test_dataset must implement __len__") elif is_datasets_available() and isinstance(test_dataset, datasets.Dataset): self._remove_unused_columns(test_dataset, description="test") test_sampler = self._get_eval_sampler(test_dataset) # We use the same batch_size as for eval. return DataLoader( test_dataset, sampler=test_sampler, batch_size=self.args.eval_batch_size, collate_fn=self.data_collator, drop_last=self.args.dataloader_drop_last, ) def create_optimizer_and_scheduler(self, num_training_steps: int): """ Setup the optimizer and the learning rate scheduler. We provide a reasonable default that works well. If you want to use something else, you can pass a tuple in the Trainer's init through :obj:`optimizers`, or subclass and override this method in a subclass. """ if self.optimizer is None: no_decay = ["bias", "LayerNorm.weight"] if hasattr(self.model.config, "adapter_fusion_models"): no_decay += [f"adapter_fusion_layer.{n}.value" for n in self.model.config.adapter_fusion_models] optimizer_grouped_parameters = [ { "params": [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay)], "weight_decay": self.args.weight_decay, }, { "params": [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay)], "weight_decay": 0.0, }, ] self.optimizer = AdamW( optimizer_grouped_parameters, lr=self.args.learning_rate, betas=(self.args.adam_beta1, self.args.adam_beta2), eps=self.args.adam_epsilon, ) if self.lr_scheduler is None: self.lr_scheduler = get_linear_schedule_with_warmup( self.optimizer, num_warmup_steps=self.args.warmup_steps, num_training_steps=num_training_steps ) def num_examples(self, dataloader: DataLoader) -> int: """ Helper to get number of samples in a :class:`~torch.utils.data.DataLoader` by accessing its dataset. Will raise an exception if the underlying dataset dese not implement method :obj:`__len__` """ return len(dataloader.dataset) def _hp_search_setup(self, trial: Union["optuna.Trial", Dict[str, Any]]): """ HP search setup code """ self._trial = trial if self.hp_search_backend is None or trial is None: return params = self.hp_space(trial) if self.hp_search_backend == HPSearchBackend.OPTUNA else trial for key, value in params.items(): if not hasattr(self.args, key): raise AttributeError( f"Trying to set {key} in the hyperparameter search but there is no corresponding field in `TrainingArguments`." ) old_attr = getattr(self.args, key, None) # Casting value to the proper type if old_attr is not None: value = type(old_attr)(value) setattr(self.args, key, value) if self.hp_search_backend == HPSearchBackend.OPTUNA: logger.info("Trial:", trial.params) def _report_to_hp_search( self, trial: Union["optuna.Trial", Dict[str, Any]], epoch: int, metrics: Dict[str, float] ): if self.hp_search_backend is None or trial is None: return self.objective = self.compute_objective(metrics.copy()) if self.hp_search_backend == HPSearchBackend.OPTUNA: trial.report(self.objective, epoch) if trial.should_prune(): raise optuna.TrialPruned() elif self.hp_search_backend == HPSearchBackend.RAY: if self.state.global_step % self.args.save_steps == 0: self._tune_save_checkpoint() tune.report(objective=self.objective, **metrics) def _tune_save_checkpoint(self): if not self.use_tune_checkpoints: return with tune.checkpoint_dir(step=self.state.global_step) as checkpoint_dir: self.args.output_dir = checkpoint_dir output_dir = os.path.join(self.args.output_dir, f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}") self.save_model(output_dir) if self.is_world_master(): self.state.save_to_json(os.path.join(output_dir, "trainer_state.json")) torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) def call_model_init(self, trial=None): model_init_argcount = len(inspect.signature(self.model_init).parameters) if model_init_argcount == 0: model = self.model_init() elif model_init_argcount == 1: model = self.model_init(trial) else: raise RuntimeError("model_init should have 0 or 1 argument.") if model is None: raise RuntimeError("model_init should not return None.") return model def train(self, model_path: Optional[str] = None, trial: Union["optuna.Trial", Dict[str, Any]] = None): """ Main training entry point. Args: model_path (:obj:`str`, `optional`): Local path to the model if the model to train has been instantiated from a local path. If present, training will resume from the optimizer/scheduler states loaded here. trial (:obj:`optuna.Trial` or :obj:`Dict[str, Any]`, `optional`): The trial run or the hyperparameter dictionary for hyperparameter search. """ # This might change the seed so needs to run first. self._hp_search_setup(trial) # Model re-init if self.model_init is not None: # Seed must be set before instantiating the model when using model_init. set_seed(self.args.seed) model = self.call_model_init(trial) self.model = model.to(self.args.device) # Reinitializes optimizer and scheduler self.optimizer, self.lr_scheduler = None, None # Keeping track whether we can can len() on the dataset or not train_dataset_is_sized = isinstance(self.train_dataset, collections.abc.Sized) # Data loader and number of training steps train_dataloader = self.get_train_dataloader() # Setting up training control variables: # number of training epochs: num_train_epochs # number of training steps per epoch: num_update_steps_per_epoch # total number of training steps to execute: max_steps if train_dataset_is_sized: num_update_steps_per_epoch = len(train_dataloader) // self.args.gradient_accumulation_steps num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1) if self.args.max_steps > 0: max_steps = self.args.max_steps num_train_epochs = self.args.max_steps // num_update_steps_per_epoch + int( self.args.max_steps % num_update_steps_per_epoch > 0 ) else: max_steps = math.ceil(self.args.num_train_epochs * num_update_steps_per_epoch) num_train_epochs = math.ceil(self.args.num_train_epochs) else: # see __init__. max_steps is set when the dataset has no __len__ max_steps = self.args.max_steps num_train_epochs = 1 num_update_steps_per_epoch = max_steps self.create_optimizer_and_scheduler(num_training_steps=max_steps) self.state = TrainerState() self.state.is_hyper_param_search = trial is not None # Check if saved optimizer or scheduler states exist self._load_optimizer_and_scheduler(model_path) # Mixed precision training with apex (torch < 1.6) model = self.model sub_model_list = self.sub_model_list if self.sub_model_list is not None else None if self.args.fp16 and _use_apex: if not is_apex_available(): raise ImportError("Please install apex from https://www.github.com/nvidia/apex to use fp16 training.") model, self.optimizer = amp.initialize(model, self.optimizer, opt_level=self.args.fp16_opt_level) # Multi-gpu training (should be after apex fp16 initialization) if self.args.n_gpu > 1: model = torch.nn.DataParallel(model) if sub_model_list is not None: for s_index, sub_model in enumerate(sub_model_list): sub_model_list[s_index] = torch.nn.DataParallel(sub_model) print("pooh pararell worked") # Distributed training (should be after apex fp16 initialization) if self.args.local_rank != -1: model = torch.nn.parallel.DistributedDataParallel( model, device_ids=[self.args.local_rank], output_device=self.args.local_rank, find_unused_parameters=( not getattr(model.config, "gradient_checkpointing", False) if isinstance(model, PreTrainedModel) else True ), ) # find_unused_parameters breaks checkpointing as per # https://github.com/huggingface/transformers/pull/4659#issuecomment-643356021 # Train! if is_torch_tpu_available(): total_train_batch_size = self.args.train_batch_size * xm.xrt_world_size() else: total_train_batch_size = ( self.args.train_batch_size * self.args.gradient_accumulation_steps * (torch.distributed.get_world_size() if self.args.local_rank != -1 else 1) ) num_examples = ( self.num_examples(train_dataloader) if train_dataset_is_sized else total_train_batch_size * self.args.max_steps ) logger.info("***** Running training *****") logger.info(" Num examples = %d", num_examples) logger.info(" Num Epochs = %d", num_train_epochs) logger.info(" Instantaneous batch size per device = %d", self.args.per_device_train_batch_size) logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d", total_train_batch_size) logger.info(" Gradient Accumulation steps = %d", self.args.gradient_accumulation_steps) logger.info(" Total optimization steps = %d", max_steps) self.state.epoch = 0 epochs_trained = 0 steps_trained_in_current_epoch = 0 # Check if continuing training from a checkpoint if model_path and os.path.isfile(os.path.join(model_path, "trainer_state.json")): self.state = TrainerState.load_from_json(os.path.join(model_path, "trainer_state.json")) epochs_trained = self.state.global_step // num_update_steps_per_epoch steps_trained_in_current_epoch = self.state.global_step % (num_update_steps_per_epoch) logger.info(" Continuing training from checkpoint, will skip to saved global_step") logger.info(" Continuing training from epoch %d", epochs_trained) logger.info(" Continuing training from global step %d", self.state.global_step) logger.info(" Will skip the first %d steps in the first epoch", steps_trained_in_current_epoch) # Update the references self.callback_handler.model = self.model self.callback_handler.optimizer = self.optimizer self.callback_handler.lr_scheduler = self.lr_scheduler self.callback_handler.train_dataloader = train_dataloader self.state.trial_name = self.hp_name(trial) if self.hp_name is not None else None self.state.trial_params = hp_params(trial) if trial is not None else None # This should be the same if the state has been saved but in case the training arguments changed, it's safer # to set this after the load. self.state.max_steps = max_steps self.state.num_train_epochs = num_train_epochs self.state.is_local_process_zero = self.is_local_process_zero() self.state.is_world_process_zero = self.is_world_process_zero() tr_loss = torch.tensor(0.0).to(self.args.device) self._logging_loss_scalar = 0 self._globalstep_last_logged = 0 self._total_flos = self.state.total_flos model.zero_grad() self.control = self.callback_handler.on_train_begin(self.args, self.state, self.control) for epoch in range(epochs_trained, num_train_epochs): if isinstance(train_dataloader, DataLoader) and isinstance(train_dataloader.sampler, DistributedSampler): train_dataloader.sampler.set_epoch(epoch) if is_torch_tpu_available(): parallel_loader = pl.ParallelLoader(train_dataloader, [self.args.device]).per_device_loader( self.args.device ) epoch_iterator = parallel_loader else: epoch_iterator = train_dataloader # Reset the past mems state at the beginning of each epoch if necessary. if self.args.past_index >= 0: self._past = None steps_in_epoch = len(epoch_iterator) if train_dataset_is_sized else self.args.max_steps self.control = self.callback_handler.on_epoch_begin(self.args, self.state, self.control) for step, inputs in enumerate(epoch_iterator): # Skip past any already trained steps if resuming training if steps_trained_in_current_epoch > 0: steps_trained_in_current_epoch -= 1 continue if (step + 1) % self.args.gradient_accumulation_steps == 0: self.control = self.callback_handler.on_step_begin(self.args, self.state, self.control) if ( ((step + 1) % self.args.gradient_accumulation_steps != 0) and self.args.local_rank != -1 and _use_ddp_no_sync ): with model.no_sync(): tr_loss += self.training_step(model, inputs) else: if sub_model_list is not None : tr_loss += self.training_step(model, inputs, sub_model_list, step, epoch) else: tr_loss += self.training_step(model, inputs) self._total_flos += self.floating_point_ops(inputs) if (step + 1) % self.args.gradient_accumulation_steps == 0 or ( # last step in epoch but step is always smaller than gradient_accumulation_steps steps_in_epoch <= self.args.gradient_accumulation_steps and (step + 1) == steps_in_epoch ): # apply adapter fusion weight regularization on the value matrix if ( hasattr(self.model.config, "adapter_fusion") and self.model.config.adapter_fusion["regularization"] ): fusion_reg_loss = self.model.base_model.get_fusion_regularization_loss() fusion_reg_loss.backward() if self.args.fp16 and _use_native_amp: self.scaler.unscale_(self.optimizer) torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm) elif self.args.fp16 and _use_apex: torch.nn.utils.clip_grad_norm_(amp.master_params(self.optimizer), self.args.max_grad_norm) else: torch.nn.utils.clip_grad_norm_(model.parameters(), self.args.max_grad_norm) if is_torch_tpu_available(): xm.optimizer_step(self.optimizer) elif self.args.fp16 and _use_native_amp: self.scaler.step(self.optimizer) self.scaler.update() else: self.optimizer.step() self.lr_scheduler.step() model.zero_grad() self.state.global_step += 1 self.state.epoch = epoch + (step + 1) / steps_in_epoch self.control = self.callback_handler.on_step_end(self.args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch) if self.control.should_epoch_stop or self.control.should_training_stop: break self.control = self.callback_handler.on_epoch_end(self.args, self.state, self.control) self._maybe_log_save_evaluate(tr_loss, model, trial, epoch) if self.args.tpu_metrics_debug or self.args.debug: if is_torch_tpu_available(): # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) else: logger.warning( "You enabled PyTorch/XLA debug metrics but you don't have a TPU " "configured. Check your training configuration if this is unexpected." ) if self.control.should_training_stop: break if self.args.past_index and hasattr(self, "_past"): # Clean the state at the end of training delattr(self, "_past") if self.do_save_adapters: logger.info("\n\nTraining completed. Do not forget to share your adapters on https://adapterhub.ml =)\n\n") else: logger.info("\n\nTraining completed. Do not forget to share your model on huggingface.co/models =)\n\n") if self.args.load_best_model_at_end and self.state.best_model_checkpoint is not None: if self.do_save_full_model: logger.info( f"Loading best model from {self.state.best_model_checkpoint} (score: {self.state.best_metric})." ) if isinstance(model, PreTrainedModel): self.model = model.from_pretrained(self.state.best_model_checkpoint) else: state_dict = torch.load(os.path.join(self.state.best_model_checkpoint, WEIGHTS_NAME)) self.model.load_state_dict(state_dict) if self.do_save_adapters: logger.info( f"Loading best adapter(s) from {self.state.best_model_checkpoint} (score: {self.state.best_metric})." ) # attempt to re-load all adapters from checkpoint for adapter in self.model.config.adapters.adapters: adapter_dir = os.path.join(self.state.best_model_checkpoint, adapter) if os.path.exists(adapter_dir): self.model.load_adapter(adapter_dir) if self.do_save_adapter_fusion: logger.info( f"Loading best adapter fusion(s) from {self.state.best_model_checkpoint} (score: {self.state.best_metric})." ) # attempt to re-load all adapter fusions from checkpoint for fusion in self.model.config.adapter_fusion_models: fusion_dir = os.path.join(self.state.best_model_checkpoint, fusion) if os.path.exists(fusion_dir): self.model.load_adapter_fusion(fusion_dir) self.model = self.model.to(self.args.device) if self._total_flos is not None: self.store_flos() self.log({"total_flos": self.state.total_flos}) self.control = self.callback_handler.on_train_end(self.args, self.state, self.control) return TrainOutput(self.state.global_step, tr_loss.item() / self.state.global_step) def _maybe_log_save_evaluate(self, tr_loss, model, trial, epoch): if self.control.should_log: logs: Dict[str, float] = {} tr_loss_scalar = tr_loss.item() logs["loss"] = (tr_loss_scalar - self._logging_loss_scalar) / ( self.state.global_step - self._globalstep_last_logged ) # backward compatibility for pytorch schedulers logs["learning_rate"] = ( self.lr_scheduler.get_last_lr()[0] if version.parse(torch.__version__) >= version.parse("1.4") else self.lr_scheduler.get_lr()[0] ) self._logging_loss_scalar = tr_loss_scalar self._globalstep_last_logged = self.state.global_step self.log(logs) metrics = None if self.control.should_evaluate: metrics = self.evaluate() self._report_to_hp_search(trial, epoch, metrics) if self.control.should_save: self._save_checkpoint(model, trial, metrics=metrics) self.control = self.callback_handler.on_save(self.args, self.state, self.control) def _save_checkpoint(self, model, trial, metrics=None): # In all cases (even distributed/parallel), self.model is always a reference # to the model we want to save. if hasattr(model, "module"): assert model.module is self.model, f"Module {model.module} should be a reference to self.model" else: assert model is self.model, f"Model {model} should be a reference to self.model" # Save model checkpoint checkpoint_folder = f"{PREFIX_CHECKPOINT_DIR}-{self.state.global_step}" if self.hp_search_backend is not None and trial is not None: run_id = trial.number if self.hp_search_backend == HPSearchBackend.OPTUNA else tune.get_trial_id() run_name = self.hp_name(trial) if self.hp_name is not None else f"run-{run_id}" output_dir = os.path.join(self.args.output_dir, run_name, checkpoint_folder) else: output_dir = os.path.join(self.args.output_dir, checkpoint_folder) self.store_flos() self.save_model(output_dir) # Save optimizer and scheduler if is_torch_tpu_available(): xm.rendezvous("saving_optimizer_states") xm.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) with warnings.catch_warnings(record=True) as caught_warnings: xm.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) reissue_pt_warnings(caught_warnings) elif self.is_world_process_zero(): torch.save(self.optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt")) with warnings.catch_warnings(record=True) as caught_warnings: torch.save(self.lr_scheduler.state_dict(), os.path.join(output_dir, "scheduler.pt")) reissue_pt_warnings(caught_warnings) # Determine the new best metric / best model checkpoint if metrics is not None and self.args.metric_for_best_model is not None: metric_to_check = self.args.metric_for_best_model if not metric_to_check.startswith("eval_"): metric_to_check = f"eval_{metric_to_check}" metric_value = metrics[metric_to_check] operator = np.greater if self.args.greater_is_better else np.less if ( self.state.best_metric is None or self.state.best_model_checkpoint is None or operator(metric_value, self.state.best_metric) ): self.state.best_metric = metric_value self.state.best_model_checkpoint = output_dir # Save the Trainer state if self.is_world_process_zero(): self.state.save_to_json(os.path.join(output_dir, "trainer_state.json")) # Maybe delete some older checkpoints. if self.is_world_process_zero(): self._rotate_checkpoints(use_mtime=True) def _load_optimizer_and_scheduler(self, model_path): """If optimizer and scheduler states exist, load them.""" if ( model_path is not None and os.path.isfile(os.path.join(model_path, "optimizer.pt")) and os.path.isfile(os.path.join(model_path, "scheduler.pt")) ): # Load in optimizer and scheduler states if is_torch_tpu_available(): # On TPU we have to take some extra precautions to properly load the states on the right device. optimizer_state = torch.load(os.path.join(model_path, "optimizer.pt"), map_location="cpu") with warnings.catch_warnings(record=True) as caught_warnings: lr_scheduler_state = torch.load(os.path.join(model_path, "scheduler.pt"), map_location="cpu") reissue_pt_warnings(caught_warnings) xm.send_cpu_data_to_device(optimizer_state, self.args.device) xm.send_cpu_data_to_device(lr_scheduler_state, self.args.device) self.optimizer.load_state_dict(optimizer_state) self.lr_scheduler.load_state_dict(lr_scheduler_state) else: self.optimizer.load_state_dict( torch.load(os.path.join(model_path, "optimizer.pt"), map_location=self.args.device) ) with warnings.catch_warnings(record=True) as caught_warnings: self.lr_scheduler.load_state_dict(torch.load(os.path.join(model_path, "scheduler.pt"))) reissue_pt_warnings(caught_warnings) def hyperparameter_search( self, hp_space: Optional[Callable[["optuna.Trial"], Dict[str, float]]] = None, compute_objective: Optional[Callable[[Dict[str, float]], float]] = None, n_trials: int = 20, direction: str = "minimize", backend: Optional[Union["str", HPSearchBackend]] = None, hp_name: Optional[Callable[["optuna.Trial"], str]] = None, **kwargs ) -> BestRun: """ Launch an hyperparameter search using ``optuna`` or ``Ray Tune``. The optimized quantity is determined by :obj:`compute_objectie`, which defaults to a function returning the evaluation loss when no metric is provided, the sum of all metrics otherwise. .. warning:: To use this method, you need to have provided a ``model_init`` when initializing your :class:`~transformers.Trainer`: we need to reinitialize the model at each new run. This is incompatible with the ``optimizers`` argument, so you need to subclass :class:`~transformers.Trainer` and override the method :meth:`~transformers.Trainer.create_optimizer_and_scheduler` for custom optimizer/scheduler. Args: hp_space (:obj:`Callable[["optuna.Trial"], Dict[str, float]]`, `optional`): A function that defines the hyperparameter search space. Will default to :func:`~transformers.trainer_utils.default_hp_space_optuna` or :func:`~transformers.trainer_utils.default_hp_space_ray` depending on your backend. compute_objective (:obj:`Callable[[Dict[str, float]], float]`, `optional`): A function computing the objective to minimize or maximize from the metrics returned by the :obj:`evaluate` method. Will default to :func:`~transformers.trainer_utils.default_compute_objective`. n_trials (:obj:`int`, `optional`, defaults to 100): The number of trial runs to test. direction(:obj:`str`, `optional`, defaults to :obj:`"minimize"`): Whether to optimize greater or lower objects. Can be :obj:`"minimize"` or :obj:`"maximize"`, you should pick :obj:`"minimize"` when optimizing the validation loss, :obj:`"maximize"` when optimizing one or several metrics. backend(:obj:`str` or :class:`~transformers.training_utils.HPSearchBackend`, `optional`): The backend to use for hyperparameter search. Will default to optuna or Ray Tune, depending on which one is installed. If both are installed, will default to optuna. kwargs: Additional keyword arguments passed along to :obj:`optuna.create_study` or :obj:`ray.tune.run`. For more information see: - the documentation of `optuna.create_study <https://optuna.readthedocs.io/en/stable/reference/alias_generated/optuna.create_study.html#optuna.create_study>`__ - the documentation of `tune.run <https://docs.ray.io/en/latest/tune/api_docs/execution.html#tune-run>`__ Returns: :class:`transformers.trainer_utils.BestRun`: All the information about the best run. """ if backend is None: backend = default_hp_search_backend() if backend is None: raise RuntimeError( "At least one of optuna or ray should be installed. " "To install optuna run `pip install optuna`." "To install ray run `pip install ray[tune]`." ) backend = HPSearchBackend(backend) if backend == HPSearchBackend.OPTUNA and not is_optuna_available(): raise RuntimeError("You picked the optuna backend, but it is not installed. Use `pip install optuna`.") if backend == HPSearchBackend.RAY and not is_ray_available(): raise RuntimeError( "You picked the Ray Tune backend, but it is not installed. Use `pip install 'ray[tune]'`." ) self.hp_search_backend = backend if self.model_init is None: raise RuntimeError( "To use hyperparameter search, you need to pass your model through a model_init function." ) self.hp_space = default_hp_space[backend] if hp_space is None else hp_space self.hp_name = hp_name self.compute_objective = default_compute_objective if compute_objective is None else compute_objective run_hp_search = run_hp_search_optuna if backend == HPSearchBackend.OPTUNA else run_hp_search_ray best_run = run_hp_search(self, n_trials, direction, **kwargs) self.hp_search_backend = None return best_run def log(self, logs: Dict[str, float]) -> None: """ Log :obj:`logs` on the various objects watching training. Subclass and override this method to inject custom behavior. Args: logs (:obj:`Dict[str, float]`): The values to log. """ if hasattr(self, "_log"): warnings.warn( "The `_log` method is deprecated and won't be called in a future version, define `log` in your subclass.", FutureWarning, ) return self._log(logs) if self.state.epoch is not None: logs["epoch"] = self.state.epoch self.control = self.callback_handler.on_log(self.args, self.state, self.control, logs) output = {**logs, **{"step": self.state.global_step}} self.state.log_history.append(output) def _prepare_inputs(self, inputs: Dict[str, Union[torch.Tensor, Any]]) -> Dict[str, Union[torch.Tensor, Any]]: """ Prepare :obj:`inputs` before feeding them to the model, converting them to tensors if they are not already and handling potential state. """ for k, v in inputs.items(): if isinstance(v, torch.Tensor): inputs[k] = v.to(self.args.device) if self.args.past_index >= 0 and self._past is not None: inputs["mems"] = self._past if self.adapter_names: inputs["adapter_names"] = self.adapter_names return inputs def training_step(self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], sub_model_list: List[nn.Module] = None, step=None, epoch=None) -> torch.Tensor: """ Perform a training step on a batch of inputs. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to train. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. Return: :obj:`torch.Tensor`: The tensor with training loss on this batch. """ if hasattr(self, "_training_step"): warnings.warn( "The `_training_step` method is deprecated and won't be called in a future version, define `training_step` in your subclass.", FutureWarning, ) return self._training_step(model, inputs, self.optimizer, step) model.train() if sub_model_list is not None: for sub_model in sub_model_list: sub_model.eval() inputs = self._prepare_inputs(inputs) if self.args.fp16 and _use_native_amp: with autocast(): loss = self.compute_loss(model, inputs) else: if sub_model_list is not None: loss = self.compute_loss(model, inputs, sub_model_list, step, epoch) else: loss = self.compute_loss(model, inputs) if self.args.n_gpu > 1: loss = loss.mean() # mean() to average on multi-gpu parallel training if self.args.gradient_accumulation_steps > 1: loss = loss / self.args.gradient_accumulation_steps if self.args.fp16 and _use_native_amp: self.scaler.scale(loss).backward() elif self.args.fp16 and _use_apex: with amp.scale_loss(loss, self.optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() return loss.detach() def compute_loss(self, model, inputs, sub_model_list=None, step=None, epoch=None): """ How the loss is computed by Trainer. By default, all models return the loss in the first element. Subclass and override for customs behavior. """ if sub_model_list is not None: #multi label # attention_label = self._multi_label(sub_model_list, inputs) #single label attention_label = self._single_label(sub_model_list, inputs) # attention_label = self._negative_single_label(sub_model_list, inputs) else: attention_label = None outputs = model(**inputs,attention_label=attention_label, step=step, epoch=epoch) # Save past state if it exists if self.args.past_index >= 0: self._past = outputs[self.args.past_index] # We don't use .loss here since the model may return tuples instead of ModelOutput. return outputs[0] def _multi_label(self, sub_model_list, inputs): attention_label_list = [] for sub_model in sub_model_list: loss, logits, labels = self.prediction_step(model=sub_model,inputs=inputs, prediction_loss_only=False) preds = torch.argmax(logits, axis=-1) b_preds = [int(torch.eq(preds[i],labels[i]).item()) for i in range(labels.shape[-1])] attention_label_list.append(b_preds) attention_label = torch.tensor(attention_label_list).transpose(-1,0) # attention_label =[batch_num,answer_choice_num] return attention_label def _single_label(self, sub_model_list, inputs): logit_list = [] c_labels = None for sub_model in sub_model_list: loss, logits, labels = self.prediction_step(model=sub_model,inputs=inputs, prediction_loss_only=False) s_logits = nn.Softmax(dim=-1)(logits) logit_list.append(s_logits) if c_labels is not None: assert (torch.equal(c_labels, labels)), "labels between sub models are different." c_labels = labels stack_all = torch.stack(logit_list) attention_label_list = [] for i in range(stack_all.shape[1]): answer_index = None best_var = 0 for j in range(stack_all.shape[0]): if torch.argmax(stack_all[j][i], dim=-1) == c_labels[i].item(): if torch.std(stack_all[j][i]).item() > best_var: best_var = torch.std(stack_all[j][i]).item() answer_index = j attention_label_list.append(answer_index) attention_label = [] for answer_label in attention_label_list: exp_label = [] for choice in range(stack_all.shape[0]): if answer_label == choice: exp_label.append(1) else: exp_label.append(0) attention_label.append(exp_label) attention_label = torch.tensor(attention_label) # attention_label =[8,3] return attention_label def _negative_single_label(self, sub_model_list, inputs): logit_list = [] c_labels = None for sub_model in sub_model_list: loss, logits, labels = self.prediction_step(model=sub_model,inputs=inputs, prediction_loss_only=False) s_logits = nn.Softmax(dim=-1)(logits) logit_list.append(s_logits) if c_labels is not None: assert (torch.equal(c_labels, labels)), "labels between sub models are different." c_labels = labels stack_all = torch.stack(logit_list) attention_label_list = [] for i in range(stack_all.shape[1]): answer_index = None wrong_index = None best_var = 0 worst_var = 0 for j in range(stack_all.shape[0]): if torch.argmax(stack_all[j][i], dim=-1) == c_labels[i].item(): if torch.std(stack_all[j][i]).item() > best_var: best_var = torch.std(stack_all[j][i]).item() answer_index = j else: if torch.std(stack_all[j][i]).item() > worst_var: worst_var = torch.std(stack_all[j][i]).item() wrong_index = j attention_label_list.append((answer_index, wrong_index)) attention_label = [] for (answer_label, wrong_label) in attention_label_list: exp_label = [] for choice in range(stack_all.shape[0]): if answer_label == choice: exp_label.append(1) elif wrong_label == choice: exp_label.append(-1) else: exp_label.append(0) attention_label.append(exp_label) attention_label = torch.tensor(attention_label) # attention_label =[8,3] return attention_label def is_local_master(self) -> bool: """ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. .. warning:: This method is deprecated, use :meth:`~transformers.Trainer.is_local_process_zero` instead. """ warnings.warn("This method is deprecated, use `Trainer.is_local_process_zero()` instead.", FutureWarning) return self.is_local_process_zero() def is_local_process_zero(self) -> bool: """ Whether or not this process is the local (e.g., on one machine if training in a distributed fashion on several machines) main process. """ if is_torch_tpu_available(): return xm.is_master_ordinal(local=True) else: return self.args.local_rank in [-1, 0] def is_world_master(self) -> bool: """ Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be :obj:`True` for one process). .. warning:: This method is deprecated, use :meth:`~transformers.Trainer.is_world_process_zero` instead. """ warnings.warn("This method is deprecated, use `Trainer.is_world_process_zero()` instead.", FutureWarning) return self.is_world_process_zero() def is_world_process_zero(self) -> bool: """ Whether or not this process is the global main process (when training in a distributed fashion on several machines, this is only going to be :obj:`True` for one process). """ if is_torch_tpu_available(): return xm.is_master_ordinal(local=False) else: return self.args.local_rank == -1 or torch.distributed.get_rank() == 0 def save_model(self, output_dir: Optional[str] = None): """ Will save the model, so you can reload it using :obj:`from_pretrained()`. Will only save from the world_master process (unless in TPUs). """ if is_torch_tpu_available(): self._save_tpu(output_dir) elif self.is_world_process_zero(): self._save(output_dir) def _save_tpu(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir logger.info("Saving model checkpoint to %s", output_dir) if xm.is_master_ordinal(): os.makedirs(output_dir, exist_ok=True) torch.save(self.args, os.path.join(output_dir, "training_args.bin")) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` xm.rendezvous("saving_checkpoint") if not isinstance(self.model, PreTrainedModel): logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") state_dict = self.model.state_dict() xm.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: if self.do_save_adapters: self.model.save_all_adapters(output_dir) if self.do_save_adapter_fusion: self.model.save_all_adapter_fusions(output_dir) if self.do_save_full_model: self.model.save_pretrained(output_dir) if self.tokenizer is not None and self.is_world_process_zero(): self.tokenizer.save_pretrained(output_dir) def _save(self, output_dir: Optional[str] = None): output_dir = output_dir if output_dir is not None else self.args.output_dir os.makedirs(output_dir, exist_ok=True) logger.info("Saving model checkpoint to %s", output_dir) # Save a trained model and configuration using `save_pretrained()`. # They can then be reloaded using `from_pretrained()` if not isinstance(self.model, PreTrainedModel): logger.info("Trainer.model is not a `PreTrainedModel`, only saving its state dict.") state_dict = self.model.state_dict() torch.save(state_dict, os.path.join(output_dir, WEIGHTS_NAME)) else: if self.do_save_adapters: self.model.save_all_adapters(output_dir) if self.do_save_adapter_fusion: self.model.save_all_adapter_fusions(output_dir) if self.do_save_full_model: self.model.save_pretrained(output_dir) if self.tokenizer is not None and self.is_world_process_zero(): self.tokenizer.save_pretrained(output_dir) # Good practice: save your training arguments together with the trained model torch.save(self.args, os.path.join(output_dir, "training_args.bin")) def store_flos(self): # Storing the number of floating-point operations that went into the model if self._total_flos is not None: if self.args.local_rank != -1: self.state.total_flos = distributed_broadcast_scalars([self._total_flos]).sum().item() else: self.state.total_flos = self._total_flos def _sorted_checkpoints(self, checkpoint_prefix=PREFIX_CHECKPOINT_DIR, use_mtime=False) -> List[str]: ordering_and_checkpoint_path = [] glob_checkpoints = [str(x) for x in Path(self.args.output_dir).glob(f"{checkpoint_prefix}-*")] for path in glob_checkpoints: if use_mtime: ordering_and_checkpoint_path.append((os.path.getmtime(path), path)) else: regex_match = re.match(f".*{checkpoint_prefix}-([0-9]+)", path) if regex_match and regex_match.groups(): ordering_and_checkpoint_path.append((int(regex_match.groups()[0]), path)) checkpoints_sorted = sorted(ordering_and_checkpoint_path) checkpoints_sorted = [checkpoint[1] for checkpoint in checkpoints_sorted] # Make sure we don't delete the best model. if self.state.best_model_checkpoint is not None: best_model_index = checkpoints_sorted.index(str(Path(self.state.best_model_checkpoint))) checkpoints_sorted[best_model_index], checkpoints_sorted[-1] = ( checkpoints_sorted[-1], checkpoints_sorted[best_model_index], ) return checkpoints_sorted def _rotate_checkpoints(self, use_mtime=False) -> None: if self.args.save_total_limit is None or self.args.save_total_limit <= 0: return # Check if we should delete older checkpoint(s) checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime) if len(checkpoints_sorted) <= self.args.save_total_limit: return number_of_checkpoints_to_delete = max(0, len(checkpoints_sorted) - self.args.save_total_limit) checkpoints_to_be_deleted = checkpoints_sorted[:number_of_checkpoints_to_delete] for checkpoint in checkpoints_to_be_deleted: logger.info("Deleting older checkpoint [{}] due to args.save_total_limit".format(checkpoint)) shutil.rmtree(checkpoint) def evaluate(self, eval_dataset: Optional[Dataset] = None) -> Dict[str, float]: """ Run evaluation and returns metrics. The calling script will be responsible for providing a method to compute metrics, as they are task-dependent (pass it to the init :obj:`compute_metrics` argument). You can also subclass and override this method to inject custom behavior. Args: eval_dataset (:obj:`Dataset`, `optional`): Pass a dataset if you wish to override :obj:`self.eval_dataset`. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. It must implement the :obj:`__len__` method. Returns: A dictionary containing the evaluation loss and the potential metrics computed from the predictions. The dictionary also contains the epoch number which comes from the training state. """ if eval_dataset is not None and not isinstance(eval_dataset, collections.abc.Sized): raise ValueError("eval_dataset must implement __len__") eval_dataloader = self.get_eval_dataloader(eval_dataset) output = self.prediction_loop( eval_dataloader, description="Evaluation", # No point gathering the predictions if there are no metrics, otherwise we defer to # self.args.prediction_loss_only prediction_loss_only=True if self.compute_metrics is None else None, ) self.log(output.metrics) if self.args.tpu_metrics_debug or self.args.debug: # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.) xm.master_print(met.metrics_report()) self.control = self.callback_handler.on_evaluate(self.args, self.state, self.control, output.metrics) return output.metrics def predict(self, test_dataset: Dataset) -> PredictionOutput: """ Run prediction and returns predictions and potential metrics. Depending on the dataset and your use case, your test dataset may contain labels. In that case, this method will also return metrics, like in :obj:`evaluate()`. Args: test_dataset (:obj:`Dataset`): Dataset to run the predictions on. If it is an :obj:`datasets.Dataset`, columns not accepted by the ``model.forward()`` method are automatically removed. Has to implement the method :obj:`__len__` .. note:: If your predictions or labels have different sequence length (for instance because you're doing dynamic padding in a token classification task) the predictions will be padded (on the right) to allow for concatenation into one array. The padding index is -100. Returns: `NamedTuple` A namedtuple with the following keys: - predictions (:obj:`np.ndarray`): The predictions on :obj:`test_dataset`. - label_ids (:obj:`np.ndarray`, `optional`): The labels (if the dataset contained some). - metrics (:obj:`Dict[str, float]`, `optional`): The potential dictionary of metrics (if the dataset contained labels). """ if test_dataset is not None and not isinstance(test_dataset, collections.abc.Sized): raise ValueError("test_dataset must implement __len__") test_dataloader = self.get_test_dataloader(test_dataset) return self.prediction_loop(test_dataloader, description="Prediction") def prediction_loop( self, dataloader: DataLoader, description: str, prediction_loss_only: Optional[bool] = None ) -> PredictionOutput: """ Prediction/evaluation loop, shared by :obj:`Trainer.evaluate()` and :obj:`Trainer.predict()`. Works both with or without labels. """ if hasattr(self, "_prediction_loop"): warnings.warn( "The `_prediction_loop` method is deprecated and won't be called in a future version, define `prediction_loop` in your subclass.", FutureWarning, ) return self._prediction_loop(dataloader, description, prediction_loss_only=prediction_loss_only) if not isinstance(dataloader.dataset, collections.abc.Sized): raise ValueError("dataset must implement __len__") prediction_loss_only = ( prediction_loss_only if prediction_loss_only is not None else self.args.prediction_loss_only ) model = self.model # multi-gpu eval if self.args.n_gpu > 1: model = torch.nn.DataParallel(model) # Note: in torch.distributed mode, there's no point in wrapping the model # inside a DistributedDataParallel as we'll be under `no_grad` anyways. batch_size = dataloader.batch_size num_examples = self.num_examples(dataloader) logger.info("***** Running %s *****", description) logger.info(" Num examples = %d", num_examples) logger.info(" Batch size = %d", batch_size) losses_host: torch.Tensor = None preds_host: Union[torch.Tensor, List[torch.Tensor]] = None labels_host: Union[torch.Tensor, List[torch.Tensor]] = None world_size = 1 if is_torch_tpu_available(): world_size = xm.xrt_world_size() elif self.args.local_rank != -1: world_size = torch.distributed.get_world_size() world_size = max(1, world_size) eval_losses_gatherer = DistributedTensorGatherer(world_size, num_examples, make_multiple_of=batch_size) if not prediction_loss_only: preds_gatherer = DistributedTensorGatherer(world_size, num_examples) labels_gatherer = DistributedTensorGatherer(world_size, num_examples) model.eval() if is_torch_tpu_available(): dataloader = pl.ParallelLoader(dataloader, [self.args.device]).per_device_loader(self.args.device) if self.args.past_index >= 0: self._past = None self.callback_handler.eval_dataloader = dataloader for step, inputs in enumerate(dataloader): loss, logits, labels = self.prediction_step(model, inputs, prediction_loss_only) if loss is not None: losses = loss.repeat(batch_size) losses_host = losses if losses_host is None else torch.cat((losses_host, losses), dim=0) if logits is not None: preds_host = logits if preds_host is None else nested_concat(preds_host, logits, padding_index=-100) if labels is not None: labels_host = labels if labels_host is None else nested_concat(labels_host, labels, padding_index=-100) self.control = self.callback_handler.on_prediction_step(self.args, self.state, self.control) # Gather all tensors and put them back on the CPU if we have done enough accumulation steps. if self.args.eval_accumulation_steps is not None and (step + 1) % self.args.eval_accumulation_steps == 0: eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) # Set back to None to begin a new accumulation losses_host, preds_host, labels_host = None, None, None if self.args.past_index and hasattr(self, "_past"): # Clean the state at the end of the evaluation loop delattr(self, "_past") # Gather all remaining tensors and put them back on the CPU eval_losses_gatherer.add_arrays(self._gather_and_numpify(losses_host, "eval_losses")) if not prediction_loss_only: preds_gatherer.add_arrays(self._gather_and_numpify(preds_host, "eval_preds")) labels_gatherer.add_arrays(self._gather_and_numpify(labels_host, "eval_label_ids")) eval_loss = eval_losses_gatherer.finalize() preds = preds_gatherer.finalize() if not prediction_loss_only else None label_ids = labels_gatherer.finalize() if not prediction_loss_only else None if self.compute_metrics is not None and preds is not None and label_ids is not None: metrics = self.compute_metrics(EvalPrediction(predictions=preds, label_ids=label_ids)) else: metrics = {} if eval_loss is not None: metrics["eval_loss"] = eval_loss.mean().item() # Prefix all keys with eval_ for key in list(metrics.keys()): if not key.startswith("eval_"): metrics[f"eval_{key}"] = metrics.pop(key) return PredictionOutput(predictions=preds, label_ids=label_ids, metrics=metrics) def _gather_and_numpify(self, tensors, name): """ Gather value of `tensors` (tensor or list/tuple of nested tensors) and convert them to numpy before concatenating them to `gathered` """ if tensors is None: return if is_torch_tpu_available(): tensors = nested_xla_mesh_reduce(tensors, name) elif self.args.local_rank != -1: tensors = distributed_concat(tensors) return nested_numpify(tensors) def prediction_step( self, model: nn.Module, inputs: Dict[str, Union[torch.Tensor, Any]], prediction_loss_only: bool ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: """ Perform an evaluation step on :obj:`model` using obj:`inputs`. Subclass and override to inject custom behavior. Args: model (:obj:`nn.Module`): The model to evaluate. inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. The dictionary will be unpacked before being fed to the model. Most models expect the targets under the argument :obj:`labels`. Check your model's documentation for all accepted arguments. prediction_loss_only (:obj:`bool`): Whether or not to return the loss only. Return: Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and labels (each being optional). """ has_labels = all(inputs.get(k) is not None for k in self.label_names) inputs = self._prepare_inputs(inputs) with torch.no_grad(): if self.args.fp16 and _use_native_amp: with autocast(): outputs = model(**inputs) else: outputs = model(**inputs) if has_labels: loss = outputs[0].mean().detach() logits = outputs[1:] else: loss = None # Slicing so we get a tuple even if `outputs` is a `ModelOutput`. logits = outputs[:] if self.args.past_index >= 0: self._past = outputs[self.args.past_index if has_labels else self.args.past_index - 1] # Remove the past from the logits. logits = logits[: self.args.past_index - 1] + logits[self.args.past_index :] if prediction_loss_only: return (loss, None, None) logits = nested_detach(logits) if len(logits) == 1: logits = logits[0] if has_labels: labels = nested_detach(tuple(inputs.get(name) for name in self.label_names)) if len(labels) == 1: labels = labels[0] else: labels = None return (loss, logits, labels) def floating_point_ops(self, inputs: Dict[str, Union[torch.Tensor, Any]]): """ For models that inherit from :class:`~transformers.PreTrainedModel`, uses that method to compute the number of floating point operations for every backward + forward pass. If using another model, either implement such a method in the model or subclass and override this method. Args: inputs (:obj:`Dict[str, Union[torch.Tensor, Any]]`): The inputs and targets of the model. Returns: :obj:`int`: The number of floating-point operations. """ model = self._actual_model(self.model) if hasattr(model, "floating_point_ops"): return model.floating_point_ops(inputs) else: return 0 @staticmethod def _actual_model( model: Union[torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel, torch.nn.modules.Module] ) -> torch.nn.modules.Module: """ Args: model: (:obj:`Union[torch.nn.DataParallel, torch.nn.parallel.DistributedDataParallel, torch.nn.modules.Module]`): Model object used during training Returns: :obj:`torch.nn.modules.Module`: unwrapped module """ if isinstance(model, torch.nn.DataParallel) or isinstance(model, torch.nn.parallel.DistributedDataParallel): model = model.module else: model = model return model
[ "yujin000731@yonsei.ac.kr" ]
yujin000731@yonsei.ac.kr
b21888d27fc947e69bc03554f871529283790509
202317110becaa376dd2779fbcfe26de045e4a2e
/01_spider/01_chinaCrop/2.py
7330b15e53002b6af72cdcab4c6c6ca68921a6f3
[]
no_license
xuming1985/python3
a1aab2821ce16098f0e9526164947d97ea27f8f4
8cbe6ed7e63fbd3fefe6670d403ab2d907b2cb85
refs/heads/master
2020-03-27T14:58:22.595101
2018-08-31T01:49:48
2018-08-31T01:49:48
146,690,359
0
0
null
null
null
null
UTF-8
Python
false
false
2,087
py
#coding=utf-8 import requests import time, json, re import math import pymongo url_root = 'http://www.cgris.net/query/do.php#粮食作物,小麦' url_data = 'http://www.cgris.net/query/o.php' session = requests.session() response = session.get(url_root) # 初始化请求, 请求作物类别的属性和下拉框的key value 对应关系 r1 = session.post(url_data, data={'action': 'init', 'croptype': ['粮食作物', '小麦'], '_': ''}) result1 = r1.text[r1.text.rfind('>') + 1: len(r1.text)] time.sleep(2) #连接Mongo数据库 client = pymongo.MongoClient(host='localhost', port=27017) db = client.spider collection = db.chinaCrop total = 1 # 假设类别下共有1条数据 pageSize = 100 # 每页100个 s = 0 # 当前第几页查询,初始0 # 如果当前页码小于等于总页码,则进行查询,否则跳出循环 while s <= math.floor(total / pageSize): # 查询请求, 查询类别下作物总数和当前页码下的作物ID列表 r2 = session.post(url_data, data={'action': 'query', 'p': {}, 's': s, 'croptype': ['粮食作物', '小麦'], '_': ''}) # 返回结果去除没用信息 result2 = r2.text[r2.text.rfind('>') + 1: len(r2.text)] # 作物的总数量 total = int(result2[2: result2.find(',')]) # 当前页面下的作物ID列表 tags = re.findall(r"(\d{2})[-](\d{5})", result2) # 循环作物列表,分别根据作物ID查询作物的详细信息 for index in range(len(tags)): # 解析作物的ID tag_str = tags[index][0] + "-" + tags[index][1] # 查询作物的详细信息 r3 = session.post(url_data, data={'action': 'item', 'p': tag_str, 'croptype': ['粮食作物', '小麦'], '_': ''}) # 解析返回结果 item_content = r3.text[r3.text.rfind('>') + 1:len(r3.text)] # 组装成json 对象 item = json.loads(item_content, encoding='utf-8') # 插入到数据库 collection.insert(item) # 暂停一秒钟,继续执行下一次循环 time.sleep(1) #页码加1, 查询下100条数据 s += 1
[ "m.b.xu@accenture.com" ]
m.b.xu@accenture.com
df0e431f178b5abc7f31bb1d09dbf97beda0f30c
e077d71357cd5791c5eca950449a718f9f11a3a8
/api_test/tf_example_test.py
65c62447aaeb851a1339429b533001bdcfaeca13
[]
no_license
smartlmy/tensorflow_project
c9ff22388c2360537a6640a5e1355a278b3c2a06
446a5bea35fd9f004ef7056a789f037f5fa34865
refs/heads/master
2023-04-22T03:08:01.476558
2021-05-08T07:32:30
2021-05-08T07:32:30
299,497,594
0
0
null
null
null
null
UTF-8
Python
false
false
4,166
py
import tensorflow as tf import numpy as np def _bytes_feature(value): """Returns a bytes_list from a string / byte.""" if isinstance(value, type(tf.constant(0))): value = value.numpy() # BytesList won't unpack a string from an EagerTensor. return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value])) def _float_feature(value): """Returns a float_list from a float / double.""" return tf.train.Feature(float_list=tf.train.FloatList(value=[value])) def _int64_feature(value): """Returns an int64_list from a bool / enum / int / uint.""" return tf.train.Feature(int64_list=tf.train.Int64List(value=[value])) print(_bytes_feature(b'test_string')) print(_bytes_feature(u'test_bytes'.encode('utf-8'))) print(_float_feature(np.exp(1))) print(_int64_feature(True)) print(_int64_feature(1)) feature = _float_feature(np.exp(1)) print(feature.SerializeToString()) # The number of observations in the dataset. n_observations = int(1e4) # Boolean feature, encoded as False or True. feature0 = np.random.choice([False, True], n_observations) # Integer feature, random from 0 to 4. feature1 = np.random.randint(0, 5, n_observations) # String feature strings = np.array([b'cat', b'dog', b'chicken', b'horse', b'goat']) feature2 = strings[feature1] # Float feature, from a standard normal distribution feature3 = np.random.randn(n_observations) def serialize_example(feature0, feature1, feature2, feature3): """ Creates a tf.Example message ready to be written to a file. """ # Create a dictionary mapping the feature name to the tf.Example-compatible # data type. feature = { 'feature0': _int64_feature(feature0), 'feature1': _int64_feature(feature1), 'feature2': _bytes_feature(feature2), 'feature3': _float_feature(feature3), } # Create a Features message using tf.train.Example. example_proto = tf.train.Example(features=tf.train.Features(feature=feature)) return example_proto.SerializeToString() example_observation = [] serialized_example = serialize_example(False, 4, b'goat', 0.9876) print(serialized_example) example_proto = tf.train.Example.FromString(serialized_example) print(example_proto) print(tf.data.Dataset.from_tensor_slices(feature1)) features_dataset = tf.data.Dataset.from_tensor_slices((feature0, feature1, feature2, feature3)) print(features_dataset) for f0,f1,f2,f3 in features_dataset.take(1): print(f0) print(f1) print(f2) print(f3) def generator(): for features in features_dataset: yield serialize_example(*features) serialized_features_dataset = tf.data.Dataset.from_generator( generator, output_types=tf.string, output_shapes=()) filename = '../data/test.tfrecord' writer = tf.data.experimental.TFRecordWriter(filename) writer.write(serialized_features_dataset) filenames = [filename] raw_dataset = tf.data.TFRecordDataset(filenames) raw_dataset for raw_record in raw_dataset.take(10): print(repr(raw_record)) # Create a description of the features. feature_description = { 'feature0': tf.io.FixedLenFeature([], tf.int64, default_value=0), 'feature1': tf.io.FixedLenFeature([], tf.int64, default_value=0), 'feature2': tf.io.FixedLenFeature([], tf.string, default_value=''), 'feature3': tf.io.FixedLenFeature([], tf.float32, default_value=0.0), } def _parse_function(example_proto): # Parse the input `tf.Example` proto using the dictionary above. return tf.io.parse_single_example(example_proto, feature_description) # Create a description of the features. feature_description = { 'feature0': tf.io.FixedLenFeature([], tf.int64, default_value=0), 'feature1': tf.io.FixedLenFeature([], tf.int64, default_value=0), # 'feature2': tf.io.FixedLenFeature([], tf.string, default_value=''), # 'feature3': tf.io.FixedLenFeature([], tf.float32, default_value=0.0), } def _parse_function(example_proto): # Parse the input `tf.Example` proto using the dictionary above. return tf.io.parse_single_example(example_proto, feature_description) parsed_dataset = raw_dataset.map(_parse_function).batch(2) print(parsed_dataset) for parsed_record in parsed_dataset.take(10): print(repr(parsed_record))
[ "smart_lmy@163.com" ]
smart_lmy@163.com
5113f8bf9f0595543e85f6a8f9655e1f589b4282
6d724d9326ede63fd940cc5d39920f38d987e716
/shop/migrations/0004_orders_orderupdate.py
9b38da972769d22736faa52aba4630c6afddc452
[]
no_license
Alan-thapa98/mac
5dea8254276ce79fd7f11e20772b43e3a9943602
a5317bcb1d6b1fde9b726dc2b0c99ddd85f18b45
refs/heads/master
2023-07-11T05:45:05.075152
2021-07-30T12:00:02
2021-07-30T12:00:02
391,047,535
2
0
null
null
null
null
UTF-8
Python
false
false
1,370
py
# Generated by Django 3.1.2 on 2021-01-24 12:43 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('shop', '0003_contact'), ] operations = [ migrations.CreateModel( name='Orders', fields=[ ('order_id', models.AutoField(primary_key=True, serialize=False)), ('items_json', models.CharField(max_length=5000)), ('amount', models.IntegerField(default=0)), ('name', models.CharField(max_length=90)), ('email', models.CharField(max_length=111)), ('address', models.CharField(max_length=111)), ('city', models.CharField(max_length=111)), ('state', models.CharField(max_length=111)), ('zip_code', models.CharField(max_length=111)), ('phone', models.CharField(default='', max_length=111)), ], ), migrations.CreateModel( name='OrderUpdate', fields=[ ('update_id', models.AutoField(primary_key=True, serialize=False)), ('order_id', models.IntegerField(default='')), ('update_desc', models.CharField(max_length=5000)), ('timestamp', models.DateField(auto_now_add=True)), ], ), ]
[ "alanthapa98.gmail.com" ]
alanthapa98.gmail.com
de6131cb7460f4df0537d86258086f70cd965e4f
73fbdbe4943cd4a8de371ba1af4b5cdfea3138d8
/project4_lyrics/lyrics_project/main.py
5b2eae2671200684d80d3cc5530e8486ab9cf16a
[]
no_license
GParolini/spiced_academy_projects
74524d99842e7659a38371b6e697f9fd90a9e0fa
64b9458c9294a767636211d59ae00e329fb527f5
refs/heads/master
2023-05-31T05:30:07.692702
2021-06-21T08:54:46
2021-06-21T08:54:46
363,920,518
0
0
null
2021-05-03T13:33:28
2021-05-03T12:22:05
null
UTF-8
Python
false
false
4,865
py
#!/usr/bin/env python # coding: utf-8 # # Project 4: Web scraping and text classification from colorama import init from colorama import deinit from colorama import Fore, Back, Style import pandas as pd from sklearn.model_selection import train_test_split from sklearn.linear_model import LogisticRegression from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.naive_bayes import MultinomialNB from utilities import * #Color print in terminal init() # Scraping data for artist1 print(Style.BRIGHT + Fore.RED + "Welcome to your lyrics finder") print(Fore.RED + "I can help you find the lyrics of your favourite artist on lyrics.com") print(Fore.GREEN + "Please provide below the name of the artist") name1=input() print(Fore.GREEN + "Please provide below the link to the artist webpage on lyrics.com") url1=input() urls_lyrics_list1=get_lyric_urls(url1, name1) lyrics_files1 = get_lyrics(urls_lyrics_list1, name1) # Reading the scraped data for artist1 metadata_df1 = read_metadata(name1) lyrics_df1 = read_lyrics(name1) df_artist1 = metadata_df1.merge(lyrics_df1) # Scraping data for artist2 print(Fore.RED + "You can select a second artist and then you can quiz me about the two artists") print(Fore.GREEN + "Please provide below the name of the artist") name2 =input() print(Fore.GREEN + "Please provide below the link to the artist webpage on lyrics.com") url2=input() urls_lyrics_list2=get_lyric_urls(url2, name2) lyrics_files2 = get_lyrics(urls_lyrics_list2, name2) # Reading the scraped data for artist2 metadata_df2 = read_metadata(name2) lyrics_df2 = read_lyrics(name2) df_artist2 = metadata_df2.merge(lyrics_df2) # Joining the two artists' dataframes df = pd.concat([df_artist1, df_artist2]) #train-test split X_train, X_test, y_train, y_test = train_test_split(df.drop(["author"], axis=1), df["author"], test_size=0.2, random_state=42) #cleaning the lyrics tests and transforming them in a list of strings list_cleaned_lyrics_train = clean_text_to_list(X_train) labels_train = y_train.tolist() #Bag of words vect = TfidfVectorizer() X = vect.fit_transform(list_cleaned_lyrics_train) #Transforming the test set list_cleaned_lyrics_test = clean_text_to_list(X_test) X_test_transformed = vect.transform(list_cleaned_lyrics_test) #Fitting a logistic regression model model_lr = LogisticRegression(class_weight='balanced').fit(X, y_train) score_lr = model_lr.score(X, y_train) #Checking how the logistic regression model performs on the test set ypred = model_lr.predict(X_test_transformed) score_lr = model_lr.score(X_test_transformed,y_test) probs_lr = model_lr.predict_proba(X_test_transformed) print(Fore.RED + "I am a data savvy software.") print(Fore.RED + "I can tell you that a logistic regression model applied to classify") print(Fore.RED + "the data of your two artists has a score of ", Back.GREEN + str(score_lr)) print(Back.RESET + Fore.RED + "and the probabilities for each entry in the test set are as follow ", Fore.RESET + str(probs_lr)) #Fitting a Naive Bayes model model_nb = MultinomialNB(alpha=1).fit(X, y_train) model_nb.score(X, y_train) #Checking how the Naive Bayes Model performs on the test set ypred_nb = model_nb.predict(X_test_transformed) score_nb = model_nb.score(X_test_transformed,y_test) probs_nb = model_nb.predict_proba(X_test_transformed) print(Back.RESET + Fore.RED + "Do no take me for a pedantic software, but I can also tell you that") print(Fore.RED + "a Naive Bayes model applied to classify the data of your two artists has a score of ", Back.GREEN + str(score_nb)) print(Back.RESET + Fore.RED + "and the probabilities for each entry in the test set are as follow ", Back.RESET + Fore.RESET + str(probs_nb)) #Testing user input print(Back.RESET + Fore.RED + "Now, please select a model between Logistic Regression and Naive Bayes.") print(Fore.RED + "Then you can quiz me with a few of your favourite lyrics.") print(Fore.RED + "I will tell you who is the author of the lyrics.") print(Fore.GREEN + "Please input your model choice (LR for Logistic Regression and NB for Naive Bayes)") model_to_use = input() print(Fore.GREEN + "Please input some lyrics for me to examine: ") user_lyrics = input() user_lyrics_transformed = vect.transform([user_lyrics]) if model_to_use=="LR": lr_pred = model_lr.predict(user_lyrics_transformed) lr_prob = model_lr.predict_proba(user_lyrics_transformed) print(Fore.YELLOW + Back.BLACK + str(lr_pred), str(lr_prob)) if model_to_use=="NB": nb_pred = model_nb.predict(user_lyrics_transformed) nb_prob = model_nb.predict_proba(user_lyrics_transformed) print(Fore.YELLOW + Back.BLACK + str(nb_pred), str(nb_prob)) if (model_to_use !="LR") and (model_to_use !="NB"): out = "You did not select a valid model" print(Fore.YELLOW + Back.BLACK + out) deinit()
[ "giudittaparolini@gmail.com" ]
giudittaparolini@gmail.com
274b6ad75620e1e8b2f7f6dee9b845a4faf52bd9
29f6b48c49b19b4ca969ba4f8d9920882449cc95
/mision1/mision1_3.py
e6490a7224ad582e70c385d2146e93b9ed9eb6b7
[]
no_license
jgm88/PythonNXT
627e9bcd2a061c349630da9d5e9f6516c51ec5f1
8f0e31dfa5893aa512f286d152d8217c8a58a2ad
refs/heads/master
2020-05-29T12:30:53.832929
2015-05-18T10:22:55
2015-05-18T10:22:55
null
0
0
null
null
null
null
UTF-8
Python
false
false
2,404
py
#!/usr/bin/env python import nxt.bluesock from nxt.motor import * import time import msvcrt import math def connect(mode, mac): if(mode=="Usb"): return nxt.locator.find_one_brick() else: return nxt.bluesock.BlueSock(mac).connect() # brick = connect('00:16:53:09:46:3B') class Robot: def __init__(self, brick, tam_encoder=360, wheel_diameter=5.6): # vector de estados, [mov+,mov-] para saber en que direccion acelerar self.vState = [False, False] self.power = 60 self.separationBetweenWheels_ = 13 self.syncMotor = SynchronizedMotors(Motor(brick, PORT_B), Motor(brick, PORT_C), 0) self.arm = Motor(brick, PORT_A) self.cuenta_= ((wheel_diameter*math.pi)/tam_encoder) # 1. Calculamos las cuentas que tendra que pasar para girar hacia un stolado. # Si suponemos que un giro sobre si mismo es de de radio separationBewteenWheels, un giro solo ocupara una # cuarta parte del perimetro de la circunferencia. self.turn_perimeter = (math.pi * 2.0 * self.separationBetweenWheels_) / 4.0 self.cuentasGiro_ = self.turn_perimeter / self.cuenta_ def move(self, direction): # vState[0] mov+ vState[1] mov- if(direction == 1): if(not self.vState[0] and not self.vState[1]): self.power = 60 if(self.vState[1]): self.power *= -1 self.vState[0] = True self.vState[1] = False if(direction == -1): if(not self.vState[0] and not self.vState[1]): self.power = -60 if(self.vState[0]): self.power *= direction self.vState[0] = False self.vState[1] = True self.syncMotor.brake() self.syncMotor.run(self.power) # Dependiendo de la direccion, acelera o decelera def speed(self, direction): if(self.vState[0]): if(self.power < 120 and self.power > 0): self.power = self.power + 5 * direction elif(self.vState[1]): if(self.power < 0 and self.power > -120): self.power = self.power - 5 * direction else: return self.syncMotor.brake() self.syncMotor.run(self.power) def turn(self, direction): if(direction == 1): self.syncMotor.leader.weak_turn(80, self.cuentasGiro_) else: self.syncMotor.follower.weak_turn(80, self.cuentasGiro_) def stop(self): if(self.vState[0] or self.vState[1]): self.vState[0] = self.vState[1] = False self.syncMotor.brake() else: self.vState[0] = True self.power = 60 self.syncMotor.run(self.power)
[ "jgm79@outlook.com" ]
jgm79@outlook.com
66f5cf44b3c273e4152b18e9b7b961b7398581b0
791710736f0f3a7d2ead79788abedc59fa5080c9
/Python/Studys/15.py.datetime/py_datetime.py
23559d264c1fcc4318b6663f7bb790655e54e2fd
[]
no_license
king2934/PL
d9afae606905c988be888dfab7e0485c64f3f69e
ce9d97152033a3709b61ffe2c5911f04e331bba8
refs/heads/master
2021-11-20T21:51:41.377540
2021-08-22T13:50:31
2021-08-22T13:50:31
135,822,182
0
0
null
null
null
null
UTF-8
Python
false
false
93
py
#!/usr/bin/python3 #coding=utf-8 import datetime now=datetime.date.today() print(now)
[ "king2934@gmail.com" ]
king2934@gmail.com
b7cb33c3441b8bbf58564daf031e567443534a58
bb1f21e32753e139500b70c7117be8b171e52399
/EquipoB/asgi.py
a4bc6d6f80a063459a784d2d0c30c25422898ca2
[]
no_license
AnFel024/EquipoB_Nodos
2c2d206edc6b7a229384f45ec99f43e63c54e8b4
06e62b3b40836d8a0b482ac761d9604bc6b17c10
refs/heads/master
2023-05-11T19:57:54.578727
2021-06-03T22:59:32
2021-06-03T22:59:32
373,662,392
0
0
null
null
null
null
UTF-8
Python
false
false
391
py
""" ASGI config for EquipoB project. It exposes the ASGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/3.2/howto/deployment/asgi/ """ import os from django.core.asgi import get_asgi_application os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'EquipoB.settings') application = get_asgi_application()
[ "anfelpe.0200@gmail.com" ]
anfelpe.0200@gmail.com
78f54828d3d74ae1fe1c8e49ce0b20a167f8193f
31572f880424a2d18bdf0c4846f4d1cc4f0cebe9
/Python_Challenge/PythonChallengeLevel_03.py
1e0895774884d8faabbde4f3b397a0a798a259e5
[]
no_license
fuxicoder/mepythonchallenge
e4ff3fb8d1dd14b75c1ba91ca4b8eaabfead6013
ce65a68174e3b2c0f61c1ae29dc46aca08f120f2
refs/heads/master
2021-01-18T23:08:20.203504
2016-10-31T13:08:08
2016-10-31T13:08:08
12,640,897
0
0
null
null
null
null
UTF-8
Python
false
false
349
py
import urllib2 f = urllib2.urlopen('http://www.pythonchallenge.com/pc/def/equality.html') data = f.read() import re reg = re.compile('[^A-Z][A-Z]{3}([a-z])[A-Z]{3}[^A-Z]') comtextReg = re.compile('<!--[\W\w]*-->') s_str_01 = ''.join(comtextReg.findall(data)) print s_str_01 print ''.join(reg.findall(data)) print ''.join(reg.findall(s_str_01))
[ "fuxicoder@gmail.com" ]
fuxicoder@gmail.com
98d48c1552f26fecce15de752b3f72278091020e
81986e18b51064a9debca09822a62c7a28f267a7
/bin/backupMint.py
2aa17f2c496351b5721121f10ea219ea4bdfd37a
[ "BSD-2-Clause" ]
permissive
rpaditya/dotfiles
eaa02730c37d7e71d03fed0b4b58e06c7521aaee
89af61c9c26a3e131578f5ec2a555b932cd0c9c5
refs/heads/master
2021-07-16T13:33:34.308627
2021-03-19T15:42:15
2021-03-19T15:42:15
29,360,337
0
0
null
null
null
null
UTF-8
Python
false
false
1,426
py
import os import urllib import urllib2 import cookielib from optparse import OptionParser import datetime class mintlib(): def __init__(self): self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor()) # need cookies for the JSESSION ID urllib2.install_opener(self.opener) def login(self, username, password): request = urllib2.Request("https://wwws.mint.com/loginUserSubmit.xevent?task=L", urllib.urlencode(locals())) request.add_header("User-Agent", "Mozilla/5.0") # Mint kicks to a "Upgrade to IE 7.0" page without this response = self.opener.open(request) def download(self, file): # write CSV file of all Mint transactions for this account to a file response = self.opener.open("https://wwws.mint.com/transactionDownload.event?") open(file, "w").write(response.read()) def logout(self): response = self.opener.open("https://wwws.mint.com/logout.event") def getOptions(): arguments = OptionParser() arguments.add_options(["--username", "--password", "--file"]) arguments.set_default("file", "mint_backup_%s.csv" % str(datetime.date.today())) return arguments.parse_args()[0] # options if __name__ == '__main__': options = getOptions() mint = mintlib() mint.login(options.username, options.password) mint.download(options.file) print "Done"
[ "aditya@grot.org" ]
aditya@grot.org
eeb5c32aeca4c54f2b5c6ffc35714485bb235f96
7174b27cd79cad398ffa1add9b59da6e9adbeae4
/python-100days/day0-15/day13/more_thread2.py
35152bd4993d043a4da4ce465dc7221aa7d7ba44
[]
no_license
UULIN/py
ddf037021afce04e46d51c133bfa06257ef7200a
a5d32597fc91fbd5ec41f54fb942c82300766299
refs/heads/master
2021-07-18T08:20:49.342072
2020-10-21T14:41:42
2020-10-21T14:41:42
222,977,134
1
0
null
null
null
null
UTF-8
Python
false
false
1,226
py
from time import sleep from threading import Thread, Lock class Account(object): def __init__(self): self._balance = 0 self._lock = Lock() def deposit(self, money): self._lock.acquire() try: # 计算存款后的余额 new_balance = self._balance + money # 模拟受理存款业务需要0.01秒的时间 sleep(0.01) # 修改账户余额 self._balance = new_balance finally: self._lock.release() @property def balance(self): return self._balance class AddMoneyThread(Thread): def __init__(self, account, money): super().__init__() self._account = account self._money = money def run(self): self._account.deposit(self._money) def main(): account = Account() threads = [] # 创建100个存款的线程向同一个账户中存钱 for _ in range(100): t = AddMoneyThread(account, 1) threads.append(t) t.start() # 等所有存款的线程都执行完毕 for t in threads: t.join() print('账户余额为: ¥%d元' % account.balance) if __name__ == '__main__': main()
[ "1036190402@qq.com" ]
1036190402@qq.com
d8bc2a7d55e802336c23e9acc4309910981c7989
220c1eb4af7b37af9574fa3765e450d3ee0a76a8
/timeclock/migrations/0004_clockinout_options.py
94400299136aa286fa152f8432feeff9a0bf6ecf
[]
no_license
jacobbexten/EmployeeTimeclock
4e63fd2e709a90dcfccc56779415f688b75b344a
a4ed6a109335bb55b2b53b4c77ffc3468339c12c
refs/heads/master
2023-01-19T11:10:08.393533
2020-11-18T05:34:12
2020-11-18T05:34:12
302,791,893
1
0
null
null
null
null
UTF-8
Python
false
false
446
py
# Generated by Django 3.0.1 on 2020-11-17 01:06 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('timeclock', '0003_calendar'), ] operations = [ migrations.AddField( model_name='clockinout', name='options', field=models.CharField(choices=[('ci', 'Clock In'), ('co', 'Clock Out')], default='ci', max_length=10), ), ]
[ "jacob.bexten@umontana.edu" ]
jacob.bexten@umontana.edu
0ca6f7dc8897396924cab918e4684a7be3e0bca5
687a2e9ba9f7e053acaff3c6bf1ad4604894c9d6
/molecule/update/tests/test_default.py
c6fd0a056e6320ace8cffbf4f9da98f93602e9eb
[ "BSD-2-Clause" ]
permissive
hostwithquantum/ansible-loki
2a823d5cae48a6d6c3640466c95823f656f9600d
c4ac5c80526b04bebe71f4293b9da257723e1aa9
refs/heads/main
2023-03-06T00:57:54.650872
2021-02-13T17:46:38
2021-02-13T17:46:38
276,343,767
17
8
BSD-2-Clause
2021-02-13T17:46:39
2020-07-01T10:09:12
HTML
UTF-8
Python
false
false
988
py
import os import pytest import testinfra.utils.ansible_runner testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( os.environ['MOLECULE_INVENTORY_FILE'] ).get_hosts('all') def test_hosts_file(host): f = host.file('/etc/hosts') assert f.exists assert f.user == 'root' assert f.group == 'root' @pytest.mark.parametrize("name", [ ("loki"), ("promtail"), ]) def test_promtail_configuration(host, name): f = host.file('/etc/' + name + '/' + name + '.yml') assert f.exists assert f.user == 'root' assert f.group == 'root' @pytest.mark.parametrize("name", [ ("loki"), ("promtail"), ]) def test_services_enabled(host, name): svc = host.service(name) assert svc.is_running assert svc.is_enabled @pytest.mark.parametrize("name", [ ("loki"), ("promtail"), ]) def test_version(host, name): version_output = host.check_output("/usr/local/bin/" + name + " -version") assert "2.1.0" in version_output
[ "till@php.net" ]
till@php.net
113948ea9a60280b3b5d22a0ab0b747f6dcf0ec6
510cd20ec65c92a87822921bf3176ddc35a51eb0
/knackpostgres/fields/concatenation_field.py
561ce13e3dca7690d67120184774d874c5eb8a3d
[]
no_license
iskandari/knackpostgres
1fe73e07f2365e475e12f00d8f4ad5eca0b633cd
2a732b28a877e5717560fad97b34f5009510ebbd
refs/heads/master
2023-05-05T17:17:52.115910
2020-05-26T00:35:30
2020-05-26T00:35:30
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,372
py
import re from ._knack_field import KnackField from knackpostgres.utils.parsers import get_parser from knackpostgres.utils.method_handler import MethodHandler # todo: handle many-to-many connections :/ # todo: reference views instead of tables and create "alter view" statements # Regex search expressions # match: field_xx or field_xx.field_xx (if it's enclosed in braces) FIELD_SEARCH_EXCLUDE_BRACES = "(?:{)(field_\d+)(?:})|(?:{)(field_\d+.field_\d+)(?:})" # match: {field_xx} or {field_xx.field_xx} FIELD_SEARCH_INCLUDE_BRACES = "({field_\d+})|({field_\d+.field_\d+})" class ConcatenationField(KnackField): """ Field wrapper/parser of Knack concatenation (aka `text formula`) fields. in the words of Gob Bluth, i didn't take `wasn't optimistic it could be done` for an answer """ def __init__(self, data, name, table): super().__init__(data, name, table) self.equation = self.format_knack.get("equation") """ todo: consider when the foreign table is the host todo: i think you need to use alter views for all formula fields, as they can be cross-dependent. but you can first test if your dependency logic is working """ def handle_formula(self, app, grammar="concatenation"): self.app = app self._get_fieldmap() self.parser = get_parser(grammar) self.tree = self.parser.parse(self.equation) self._process_methods() self._gather_all_sql() self._to_sql() return self def _process_methods(self): """ Traverse up through the Lark tree that is comprised of the contents of the knack formula field definition. when a method is reached, collect it's child components (arbitrary strings or other methods). In this way, we roll up each sub-component of the field definition from it's component parts, translating each part to SQL syntax along the way. """ for node in self.tree.iter_subtrees(): # these `.data` values are defined by the parser's grammar if node.data == "_values": continue elif node.data == "method": self._handle_method(node) def _parse_arg(self, arg): """ Parse an argument of a Knack formula field string method. Args can be a combination of arbitrary strings and other string methods """ if arg.data == "second_arg" and len(arg.children) == 1: # try to convert second arg to an int, and if so return it as a stringified number try: return str(int(arg.children[0].children[0].value)) except: pass arg_substrings = [] for elem in arg.children: arg_type = elem.data if arg_type == "text_content": text_content = elem.children[0].value substrings = self._parse_fieldnames(text_content) arg_substrings += substrings elif arg_type == "method": arg_substrings.append(elem.sql) return ( f"CONCAT({', '.join(arg_substrings)})" if len(arg_substrings) > 1 else arg_substrings[0] ) def _handle_method(self, method): """ Translate a knack string method to sql. Because we're iterating """ method.args = [] for elem in method.children: name = elem.data if name == "method_name": method.name = elem.children[0].value elif "arg" in name: method.args.append(self._parse_arg(elem)) handler = MethodHandler(method) method.sql = handler.handle_method() return None def _get_fieldmap(self): """ Generate an index of knack fieldames and their postgres fieldnames. We have to look across all tables in the app to accomplish this, because formula fields may reference fields in other tables. Also, collect all the table names involved in this field, so we can include them in our SQL statement """ self.fieldmap = {} self.tables = [] self.connection_fields = [] fieldname_matches = re.findall(FIELD_SEARCH_EXCLUDE_BRACES, self.equation) # and we need to unpack the results, which are touples of capturing groups. a tubple will # either have a value in first position (for normal field) or second position (connection field) fieldnames = [field[0] for field in fieldname_matches if field[0]] fieldnames += [field[1] for field in fieldname_matches if field[1]] # reduce to unique fieldnames = list(set(fieldnames)) for fieldname in fieldnames: try: # attempt to handle connected field conn_fieldname = fieldname.split(".")[0] target_fieldname = fieldname.split(".")[1] except IndexError: target_fieldname = fieldname conn_fieldname = None pass target_field = self.app.find_field_from_field_key(target_fieldname) if conn_fieldname: conn_field = self.app.find_field_from_field_key(conn_fieldname) self.connection_fields.append(conn_field) if target_field.table.name_postgres not in self.tables: self.tables.append(target_field.table.name_postgres) self.fieldmap[ fieldname ] = f"{target_field.table.name_postgres}.{target_field.name_postgres}" return self def _parse_fieldnames(self, text_content): """ Split a string into it's fieldname and non-fieldname parts. wrapping the non-fieldnames parts in single quotes, as SQL requires, and replacing fieldnames with their postgres fieldnames we include braces in our field search, because we must know which substrings are syntactical {field_xx} calls, or if for some reason your text formula has a non-field value like `field_99` :| """ field_search = re.compile(FIELD_SEARCH_INCLUDE_BRACES) # fetch the known fieldnames in this formula from the fieldmap, adding braces as mentioned above try: fieldnames = [f"{{{fieldname}}}" for fieldname in self.fieldmap.keys()] except AttributeError: pass # split the string into it's components of fieldnames and non-fieldnames substrings = field_search.split(text_content) # remove None values and empty strings, an artecfact of regex.findall substrings = [sub for sub in substrings if sub != "" and sub != None] # replace the fieldname elements with their postgres fieldname for i, sub in enumerate(substrings): if sub in fieldnames: substrings[i] = self.fieldmap[sub.replace("{", "").replace("}", "")] else: # escape any single quotes in the substring, for sql sub = sub.replace("'", "\\'") # wrap non-fieldnames in single quotes, for sql substrings[i] = f"'{sub}'" return substrings def _gather_all_sql(self): """ At this point, all of the method nodes and their children have a `sql` attribute, and all that's left to do is to create the sql syntax for the top-level elements (which can be comprised of aribitrary strings or Knack field names, e.g. {field_101}) Assigns a list of the sql statements in each top-level node in the tree to `self.tree.sql` """ self.tree.sql = [] for elem in self.tree.children: if elem.data == "text_content": text_content = elem.children[0].value substrings = self._parse_fieldnames(text_content) self.tree.sql += substrings elif elem.data == "method": self.tree.sql.append(elem.sql) def _to_sql(self): """ At this point, every top-level node in our tree has a `sql` attribute, they merely need to be concatenated. """ self.sql = f"""CONCAT({', '.join(self.tree.sql)}) AS {self.name_postgres}""" return self.sql
[ "john.clary@austintexas.gov" ]
john.clary@austintexas.gov
8902f287b0da03aff4ae434389d184bc105712de
c188c831f7577a143d5c7b876a061b383967dc73
/apps/courses/migrations/0005_lesson_url.py
ac7dab6144590139bf2f46e28363ae0c0ecb920a
[]
no_license
ksino/Gmooc
516eab67c7e350d3472c9b2ac0bf21e426c56377
fc2d06fd3f74c2131c6782375d23f13ea6ff79f9
refs/heads/master
2021-04-18T15:22:13.582561
2018-03-29T04:33:54
2018-03-29T04:33:54
126,559,419
1
0
null
null
null
null
UTF-8
Python
false
false
489
py
# -*- coding: utf-8 -*- # Generated by Django 1.9 on 2017-10-30 15:20 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('courses', '0004_course_tag'), ] operations = [ migrations.AddField( model_name='lesson', name='url', field=models.CharField(default='', max_length=100, verbose_name='\u8bbf\u95ee\u5730\u5740'), ), ]
[ "flycat86@163.com" ]
flycat86@163.com
644d4865c9256997bf606c9d5663f84329073618
81f23f48f32b8680a861bb1a8117d0b265aa133f
/apps/code_coverage/views.py
07f6d682f6354be3071ae5460f1fa9bb38f11efe
[]
no_license
Dearin/auto_test_api
d8d63f55e64d8335b2f21fc506cc59d0252a2c72
390377fed202a17dd1accc9be0afaca9d38bfb44
refs/heads/master
2023-07-12T02:46:44.099695
2021-08-02T03:23:33
2021-08-02T03:23:33
391,800,805
0
0
null
null
null
null
UTF-8
Python
false
false
4,033
py
import os import json import time, datetime from django.http.response import HttpResponse from libs.tool import timestamp_to_str, json_response import logging from libs.ssh import SSH from conf.ENV_CONF import CODE_COVERAGE_SERVER # Create your views here. logging.basicConfig(level=logging.DEBUG,filename='/root/dhcp.log', format='%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') # Create your views here. ssh = SSH(hostname=CODE_COVERAGE_SERVER['host'], username=CODE_COVERAGE_SERVER['username'], password=CODE_COVERAGE_SERVER['password']) client = ssh.get_client() #获取文件后缀 def getsuffix(filename): suffix = filename.split(".") return suffix[len(suffix)-1] def get_code_coverage_report(request): path = "/root/coverage/sample/report/" stdin, stdout, stderr = client.exec_command('ls /root/coverage/sample/report|grep -E "log|html"') file_lists = stdout.read().decode('utf-8')[:-1].split("\n") file_log = [] file_html = [] data = [] for file in file_lists: if "log" in file: file_log.append(file) elif "html" in file: file_html.append(file) for i in file_log: res = dict() time_flag = i.split(".")[0] res['key'] = time_flag res['time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(int(time_flag))) res['branch'] = '' res['report'] = '运行中....' res["log"] = i for j in file_html: if time_flag in j: file_html.remove(j) res['branch'] = j.split("_")[0] res['report'] = j res['url'] = path + j data.append(res) data = sorted(data, key=lambda e: e.__getitem__('time'), reverse=True) return json_response(data) def handle_rake_log(request): log_name = request.GET.get('log') stdin, stdout, stderr = client.exec_command('cat /root/coverage/sample/report/{0}'.format(log_name)) log_detail = stdout.read().decode('utf-8')[:-1].split("\n") respone = { "code": 200, "data": log_detail } return json_response(respone) def handle_rake_test(request): data = request.body data = data.decode('utf-8') data = json.loads(data) if data else {} client.exec_command("cd {0} && git pull".format(CODE_COVERAGE_SERVER['zddiv3_path'])) branch = data['branch'].strip() stdin, stdout, stderr = client.exec_command( "cd {0} && git checkout -f {1}".format(CODE_COVERAGE_SERVER['zddiv3_path'], branch)) git_res = stderr.read().decode('utf-8') if 'error' in git_res: return json_response(error="{0}分支可能有误,请检查!".format(branch)) check_rake_exist = "ps aux|grep -E 'rake'|grep -v grep|wc -l" stdin, stdout, stderr = client.exec_command(check_rake_exist) rpm_exist = stdout.read().decode('utf-8')[:-1] if rpm_exist != '0': return json_response(error="有代码覆盖率正在进行,请稍后重试") # 配置分支名称 # current_time = datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S") current_time = int(time.time()) client.exec_command('sed -i "/name:/d" /opt/auto_test_api/conf/conf.yml') client.exec_command('sed -i "/create_html:/d" /opt/auto_test_api/conf/conf.yml') time.sleep(0.5) client.exec_command('/bin/echo "name: {0}" >> /opt/auto_test_api/conf/conf.yml'.format(branch)) client.exec_command('/bin/echo "create_html: {0}" >> /opt/auto_test_api/conf/conf.yml'.format(current_time)) client.exec_command( 'cd /root/coverage/sample/ && /usr/local/bin/rake test >> /root/coverage/sample/report/{0}.log'.format( current_time)) data = { 'code': 200, 'msg': 'success' } return json_response(data) def test(request): if request.body: print(json.loads(request.body)) logging.info(json.loads(request.body)) return HttpResponse(json.dumps({"code": 200}), content_type='application/json')
[ "dengshuyue@zdns.cn" ]
dengshuyue@zdns.cn
da8a0a603298464ec6e44b46874ac0ad6c5ed880
8669415c4d5b8019d0f50c6ea727192c23b937c2
/test/functional/sendheaders.py
8af0d435985cc45559f2606f4e4b67787aa8cc70
[ "MIT" ]
permissive
Palem1988/ion_old
48a4d8e436691d9e05ebe44fc8811b0291093b83
2c2b532abf61e2a06231c1d3b4d9b2bd0cdb469a
refs/heads/master
2020-08-05T07:51:35.440658
2017-06-25T21:47:57
2017-06-25T21:47:57
212,453,442
0
0
NOASSERTION
2019-10-02T22:32:26
2019-10-02T22:32:19
null
UTF-8
Python
false
false
24,113
py
#!/usr/bin/env python3 # Copyright (c) 2014-2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test behavior of headers messages to announce blocks. Setup: - Two nodes, two p2p connections to node0. One p2p connection should only ever receive inv's (omitted from testing description below, this is our control). Second node is used for creating reorgs. Part 1: No headers announcements before "sendheaders" a. node mines a block [expect: inv] send getdata for the block [expect: block] b. node mines another block [expect: inv] send getheaders and getdata [expect: headers, then block] c. node mines another block [expect: inv] peer mines a block, announces with header [expect: getdata] d. node mines another block [expect: inv] Part 2: After "sendheaders", headers announcements should generally work. a. peer sends sendheaders [expect: no response] peer sends getheaders with current tip [expect: no response] b. node mines a block [expect: tip header] c. for N in 1, ..., 10: * for announce-type in {inv, header} - peer mines N blocks, announces with announce-type [ expect: getheaders/getdata or getdata, deliver block(s) ] - node mines a block [ expect: 1 header ] Part 3: Headers announcements stop after large reorg and resume after getheaders or inv from peer. - For response-type in {inv, getheaders} * node mines a 7 block reorg [ expect: headers announcement of 8 blocks ] * node mines an 8-block reorg [ expect: inv at tip ] * peer responds with getblocks/getdata [expect: inv, blocks ] * node mines another block [ expect: inv at tip, peer sends getdata, expect: block ] * node mines another block at tip [ expect: inv ] * peer responds with getheaders with an old hashstop more than 8 blocks back [expect: headers] * peer requests block [ expect: block ] * node mines another block at tip [ expect: inv, peer sends getdata, expect: block ] * peer sends response-type [expect headers if getheaders, getheaders/getdata if mining new block] * node mines 1 block [expect: 1 header, peer responds with getdata] Part 4: Test direct fetch behavior a. Announce 2 old block headers. Expect: no getdata requests. b. Announce 3 new blocks via 1 headers message. Expect: one getdata request for all 3 blocks. (Send blocks.) c. Announce 1 header that forks off the last two blocks. Expect: no response. d. Announce 1 more header that builds on that fork. Expect: one getdata request for two blocks. e. Announce 16 more headers that build on that fork. Expect: getdata request for 14 more blocks. f. Announce 1 more header that builds on that fork. Expect: no response. Part 5: Test handling of headers that don't connect. a. Repeat 10 times: 1. Announce a header that doesn't connect. Expect: getheaders message 2. Send headers chain. Expect: getdata for the missing blocks, tip update. b. Then send 9 more headers that don't connect. Expect: getheaders message each time. c. Announce a header that does connect. Expect: no response. d. Announce 49 headers that don't connect. Expect: getheaders message each time. e. Announce one more that doesn't connect. Expect: disconnect. """ from test_framework.mininode import * from test_framework.test_framework import IonTestFramework from test_framework.util import * from test_framework.blocktools import create_block, create_coinbase direct_fetch_response_time = 0.05 class TestNode(NodeConnCB): def __init__(self): super().__init__() self.block_announced = False self.last_blockhash_announced = None def clear_last_announcement(self): with mininode_lock: self.block_announced = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) # Request data for a list of block hashes def get_data(self, block_hashes): msg = msg_getdata() for x in block_hashes: msg.inv.append(CInv(2, x)) self.connection.send_message(msg) def get_headers(self, locator, hashstop): msg = msg_getheaders() msg.locator.vHave = locator msg.hashstop = hashstop self.connection.send_message(msg) def send_block_inv(self, blockhash): msg = msg_inv() msg.inv = [CInv(2, blockhash)] self.connection.send_message(msg) def on_inv(self, conn, message): self.block_announced = True self.last_blockhash_announced = message.inv[-1].hash def on_headers(self, conn, message): if len(message.headers): self.block_announced = True message.headers[-1].calc_sha256() self.last_blockhash_announced = message.headers[-1].sha256 def on_block(self, conn, message): self.last_message["block"].calc_sha256() # Test whether the last announcement we received had the # right header or the right inv # inv and headers should be lists of block hashes def check_last_announcement(self, headers=None, inv=None): expect_headers = headers if headers != None else [] expect_inv = inv if inv != None else [] test_function = lambda: self.block_announced assert(wait_until(test_function, timeout=60)) with mininode_lock: self.block_announced = False success = True compare_inv = [] if "inv" in self.last_message: compare_inv = [x.hash for x in self.last_message["inv"].inv] if compare_inv != expect_inv: success = False hash_headers = [] if "headers" in self.last_message: # treat headers as a list of block hashes hash_headers = [ x.sha256 for x in self.last_message["headers"].headers ] if hash_headers != expect_headers: success = False self.last_message.pop("inv", None) self.last_message.pop("headers", None) return success def wait_for_getdata(self, hash_list, timeout=60): if hash_list == []: return test_function = lambda: "getdata" in self.last_message and [x.hash for x in self.last_message["getdata"].inv] == hash_list assert(wait_until(test_function, timeout=timeout)) return def wait_for_block_announcement(self, block_hash, timeout=60): test_function = lambda: self.last_blockhash_announced == block_hash assert(wait_until(test_function, timeout=timeout)) return def send_header_for_blocks(self, new_blocks): headers_message = msg_headers() headers_message.headers = [ CBlockHeader(b) for b in new_blocks ] self.send_message(headers_message) def send_getblocks(self, locator): getblocks_message = msg_getblocks() getblocks_message.locator.vHave = locator self.send_message(getblocks_message) class SendHeadersTest(IonTestFramework): def __init__(self): super().__init__() self.setup_clean_chain = True self.num_nodes = 2 # mine count blocks and return the new tip def mine_blocks(self, count): # Clear out last block announcement from each p2p listener [ x.clear_last_announcement() for x in self.p2p_connections ] self.nodes[0].generate(count) return int(self.nodes[0].getbestblockhash(), 16) # mine a reorg that invalidates length blocks (replacing them with # length+1 blocks). # Note: we clear the state of our p2p connections after the # to-be-reorged-out blocks are mined, so that we don't break later tests. # return the list of block hashes newly mined def mine_reorg(self, length): self.nodes[0].generate(length) # make sure all invalidated blocks are node0's sync_blocks(self.nodes, wait=0.1) for x in self.p2p_connections: x.wait_for_block_announcement(int(self.nodes[0].getbestblockhash(), 16)) x.clear_last_announcement() tip_height = self.nodes[1].getblockcount() hash_to_invalidate = self.nodes[1].getblockhash(tip_height-(length-1)) self.nodes[1].invalidateblock(hash_to_invalidate) all_hashes = self.nodes[1].generate(length+1) # Must be longer than the orig chain sync_blocks(self.nodes, wait=0.1) return [int(x, 16) for x in all_hashes] def run_test(self): # Setup the p2p connections and start up the network thread. inv_node = TestNode() test_node = TestNode() self.p2p_connections = [inv_node, test_node] connections = [] connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], inv_node)) # Set nServices to 0 for test_node, so no block download will occur outside of # direct fetching connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node, services=0)) inv_node.add_connection(connections[0]) test_node.add_connection(connections[1]) NetworkThread().start() # Start up network handling in another thread # Test logic begins here inv_node.wait_for_verack() test_node.wait_for_verack() tip = int(self.nodes[0].getbestblockhash(), 16) # PART 1 # 1. Mine a block; expect inv announcements each time self.log.info("Part 1: headers don't start before sendheaders message...") for i in range(4): old_tip = tip tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(inv=[tip]), True) # Try a few different responses; none should affect next announcement if i == 0: # first request the block test_node.get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # next try requesting header and block test_node.get_headers(locator=[old_tip], hashstop=tip) test_node.get_data([tip]) test_node.wait_for_block(tip) test_node.clear_last_announcement() # since we requested headers... elif i == 2: # this time announce own block via headers height = self.nodes[0].getblockcount() last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 new_block = create_block(tip, create_coinbase(height+1), block_time) new_block.solve() test_node.send_header_for_blocks([new_block]) test_node.wait_for_getdata([new_block.sha256]) test_node.send_message(msg_block(new_block)) test_node.sync_with_ping() # make sure this block is processed inv_node.clear_last_announcement() test_node.clear_last_announcement() self.log.info("Part 1: success!") self.log.info("Part 2: announce blocks with headers after sendheaders message...") # PART 2 # 2. Send a sendheaders message and test that headers announcements # commence and keep working. test_node.send_message(msg_sendheaders()) prev_tip = int(self.nodes[0].getbestblockhash(), 16) test_node.get_headers(locator=[prev_tip], hashstop=0) test_node.sync_with_ping() # Now that we've synced headers, headers announcements should work tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=[tip]), True) height = self.nodes[0].getblockcount()+1 block_time += 10 # Advance far enough ahead for i in range(10): # Mine i blocks, and alternate announcing either via # inv (of tip) or via headers. After each, new blocks # mined by the node should successfully be announced # with block header, even though the blocks are never requested for j in range(2): blocks = [] for b in range(i+1): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 if j == 0: # Announce via inv test_node.send_block_inv(tip) test_node.wait_for_getheaders() # Should have received a getheaders now test_node.send_header_for_blocks(blocks) # Test that duplicate inv's won't result in duplicate # getdata requests, or duplicate headers announcements [ inv_node.send_block_inv(x.sha256) for x in blocks ] test_node.wait_for_getdata([x.sha256 for x in blocks]) inv_node.sync_with_ping() else: # Announce via headers test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) # Test that duplicate headers won't result in duplicate # getdata requests (the check is further down) inv_node.send_header_for_blocks(blocks) inv_node.sync_with_ping() [ test_node.send_message(msg_block(x)) for x in blocks ] test_node.sync_with_ping() inv_node.sync_with_ping() # This block should not be announced to the inv node (since it also # broadcast it) assert "inv" not in inv_node.last_message assert "headers" not in inv_node.last_message tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=[tip]), True) height += 1 block_time += 1 self.log.info("Part 2: success!") self.log.info("Part 3: headers announcements can stop after large reorg, and resume after headers/inv from peer...") # PART 3. Headers announcements can stop after large reorg, and resume after # getheaders or inv from peer. for j in range(2): # First try mining a reorg that can propagate with header announcement new_block_hashes = self.mine_reorg(length=7) tip = new_block_hashes[-1] assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=new_block_hashes), True) block_time += 8 # Mine a too-large reorg, which should be announced with a single inv new_block_hashes = self.mine_reorg(length=8) tip = new_block_hashes[-1] assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(inv=[tip]), True) block_time += 9 fork_point = self.nodes[0].getblock("%02x" % new_block_hashes[0])["previousblockhash"] fork_point = int(fork_point, 16) # Use getblocks/getdata test_node.send_getblocks(locator = [fork_point]) assert_equal(test_node.check_last_announcement(inv=new_block_hashes), True) test_node.get_data(new_block_hashes) test_node.wait_for_block(new_block_hashes[-1]) for i in range(3): # Mine another block, still should get only an inv tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(inv=[tip]), True) if i == 0: # Just get the data -- shouldn't cause headers announcements to resume test_node.get_data([tip]) test_node.wait_for_block(tip) elif i == 1: # Send a getheaders message that shouldn't trigger headers announcements # to resume (best header sent will be too old) test_node.get_headers(locator=[fork_point], hashstop=new_block_hashes[1]) test_node.get_data([tip]) test_node.wait_for_block(tip) elif i == 2: test_node.get_data([tip]) test_node.wait_for_block(tip) # This time, try sending either a getheaders to trigger resumption # of headers announcements, or mine a new block and inv it, also # triggering resumption of headers announcements. if j == 0: test_node.get_headers(locator=[tip], hashstop=0) test_node.sync_with_ping() else: test_node.send_block_inv(tip) test_node.sync_with_ping() # New blocks should now be announced with header tip = self.mine_blocks(1) assert_equal(inv_node.check_last_announcement(inv=[tip]), True) assert_equal(test_node.check_last_announcement(headers=[tip]), True) self.log.info("Part 3: success!") self.log.info("Part 4: Testing direct fetch behavior...") tip = self.mine_blocks(1) height = self.nodes[0].getblockcount() + 1 last_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] block_time = last_time + 1 # Create 2 blocks. Send the blocks, then send the headers. blocks = [] for b in range(2): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 inv_node.send_message(msg_block(blocks[-1])) inv_node.sync_with_ping() # Make sure blocks are processed test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() # should not have received any getdata messages with mininode_lock: assert "getdata" not in test_node.last_message # This time, direct fetch should work blocks = [] for b in range(3): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 test_node.send_header_for_blocks(blocks) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks], timeout=direct_fetch_response_time) [ test_node.send_message(msg_block(x)) for x in blocks ] test_node.sync_with_ping() # Now announce a header that forks the last two blocks tip = blocks[0].sha256 height -= 1 blocks = [] # Create extra blocks for later for b in range(20): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Announcing one block on fork should not trigger direct fetch # (less work than tip) test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[0:1]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message # Announcing one more block on fork should trigger direct fetch for # both blocks (same work as tip) test_node.send_header_for_blocks(blocks[1:2]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[0:2]], timeout=direct_fetch_response_time) # Announcing 16 more headers should trigger direct fetch for 14 more # blocks test_node.send_header_for_blocks(blocks[2:18]) test_node.sync_with_ping() test_node.wait_for_getdata([x.sha256 for x in blocks[2:16]], timeout=direct_fetch_response_time) # Announcing 1 more header should not trigger any response test_node.last_message.pop("getdata", None) test_node.send_header_for_blocks(blocks[18:19]) test_node.sync_with_ping() with mininode_lock: assert "getdata" not in test_node.last_message self.log.info("Part 4: success!") # Now deliver all those blocks we announced. [ test_node.send_message(msg_block(x)) for x in blocks ] self.log.info("Part 5: Testing handling of unconnecting headers") # First we test that receipt of an unconnecting header doesn't prevent # chain sync. for i in range(10): test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. for j in range(2): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 # Send the header of the second block -> this won't connect. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[1]]) test_node.wait_for_getheaders() test_node.send_header_for_blocks(blocks) test_node.wait_for_getdata([x.sha256 for x in blocks]) [ test_node.send_message(msg_block(x)) for x in blocks ] test_node.sync_with_ping() assert_equal(int(self.nodes[0].getbestblockhash(), 16), blocks[1].sha256) blocks = [] # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 for j in range(MAX_UNCONNECTING_HEADERS+1): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 block_time += 1 height += 1 for i in range(1, MAX_UNCONNECTING_HEADERS): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i]]) test_node.wait_for_getheaders() # Next header will connect, should re-set our count: test_node.send_header_for_blocks([blocks[0]]) # Remove the first two entries (blocks[1] would connect): blocks = blocks[2:] # Now try to see how many unconnecting headers we can send # before we get disconnected. Should be 5*MAX_UNCONNECTING_HEADERS for i in range(5*MAX_UNCONNECTING_HEADERS - 1): # Send a header that doesn't connect, check that we get a getheaders. with mininode_lock: test_node.last_message.pop("getheaders", None) test_node.send_header_for_blocks([blocks[i%len(blocks)]]) test_node.wait_for_getheaders() # Eventually this stops working. test_node.send_header_for_blocks([blocks[-1]]) # Should get disconnected test_node.wait_for_disconnect() self.log.info("Part 5: success!") # Finally, check that the inv node never received a getdata request, # throughout the test assert "getdata" not in inv_node.last_message if __name__ == '__main__': SendHeadersTest().main()
[ "richard@ionomy.com" ]
richard@ionomy.com
85ae61cc05563eee47e7f771d1f64d635a86292e
192dec1ea734fd67a3c3720228826cf754b2da5a
/valeo/vr/apps.py
b88f93e9775d8048cb831c38beadcdde6919dbff
[]
no_license
fafaschiavo/cpi_valeo
a4df4e64161e58e44ade276f0b6284abfb5af6d2
777ef6173bbc4bf5941098cb2ea3b13fccf490c1
refs/heads/master
2020-04-06T04:14:59.226013
2017-05-02T22:39:00
2017-05-02T22:39:00
82,980,893
0
0
null
null
null
null
UTF-8
Python
false
false
120
py
from __future__ import unicode_literals from django.apps import AppConfig class VrConfig(AppConfig): name = 'vr'
[ "fayschiavo@gmail.com" ]
fayschiavo@gmail.com
eccf709bc85d1da00c645964d906df42ca0dd0af
52b5773617a1b972a905de4d692540d26ff74926
/.history/reverseA_20200714202827.py
c8528cea3532a5e29a64703e1b1f20412489e57a
[]
no_license
MaryanneNjeri/pythonModules
56f54bf098ae58ea069bf33f11ae94fa8eedcabc
f4e56b1e4dda2349267af634a46f6b9df6686020
refs/heads/master
2022-12-16T02:59:19.896129
2020-09-11T12:05:22
2020-09-11T12:05:22
null
0
0
null
null
null
null
UTF-8
Python
false
false
282
py
''' Given array A consisting of N integers, return the reversed array ''' def array(arr): i = 0 j = len(arr)-1 while i < len(arr)-2 and j > 0: temp = arr[i] arr[i] = arr[j] arr[j] = temp i +=1 j -=1 arr([1, 2, 3, 4, 5, 6])
[ "mary.jereh@gmail.com" ]
mary.jereh@gmail.com
ed1db940ecdaeaba4082db5a0a4be025dab832ab
43d322687c22743bed050a7e0096815dd28d8319
/utils/kill_job.py
7a1cf36efacc261ea4a2625e9d18fd37815e6e8e
[]
no_license
dongli/gsi-scripts
1f41af0dd5148631ee1d533460418dfa71879332
4198667c87c9ca4f0a66e2a1ca0b5a6134caf5c1
refs/heads/master
2022-06-11T01:42:55.092954
2020-05-01T13:17:40
2020-05-01T13:17:40
260,460,546
1
0
null
null
null
null
UTF-8
Python
false
false
251
py
from job_running import job_running import cli from run import run def kill_job(args, job_id): if job_running(args, job_id): cli.warning(f'Kill job {job_id}.') if args.slurm: run(f'scancel {job_id}') elif args.pbs: run(f'qdel {job_id}')
[ "dongli@lasg.iap.ac.cn" ]
dongli@lasg.iap.ac.cn
d29de614e9a8d0a96f901684004e73b06d75f9f3
4123e93b12fb8a179cc04536c2935239d5905dd9
/src/iterative_sorting/insertion_sort.py
622c5291f9489336d3f44c4a07ad70e588d34969
[]
no_license
MicahJank/cs-module-project-iterative-sorting
82764ee55def4413de6943ba8d6392789152d514
0786d2db5ba3e02ad51f689d7a666a557cc5edf5
refs/heads/master
2022-11-05T16:40:28.726000
2020-06-08T16:51:41
2020-06-08T16:51:41
269,215,896
0
0
null
2020-06-08T16:51:43
2020-06-03T23:28:25
null
UTF-8
Python
false
false
1,220
py
def simple_implementation(arr): # loop from 1 to end of array for i in range(1, len(arr)): temp = arr[i] ​ j = i ​ while j > 0 and temp < arr[j - 1]: # copy element to left to this position arr[j] = arr[j - 1] ​ j -= 1 ​ arr[j] = temp ​ return arr ​ ​ # Sorting books! ​ class Book: def __init__(self, author, title, genre): self.author = author self.title = title self.genre = genre ​ # __str__ pretty prints only if we call print on an individual book # __repr__ will work with an array of books # def __str__(self): def __repr__(self): return f'{self.title} by {self.author} in {self.genre}' ​ ​ ​ books = [Book("Melville", "Moby Dick", "Whale stories"), Book("Immortal William", "Hamlet", "emo Danish princes")] ​ ​ def insertion_sort(books): for i in range(1, len(books)): ​ temp = books[i] ​ j = i ​ # while we're not at front of list and these two should be swapped while j > 0 and temp.genre < books[j - 1].genre: books[j] = books[j - 1] j -= 1 ​ books[j] = temp ​ return books
[ "MicahJank@users.noreply.github.com" ]
MicahJank@users.noreply.github.com
1898f53db1e53665c6f69f9ef8b54411b060dd23
75983ccc6e1eba55890429baace2bf716ac4cf33
/python/tvm/relay/ir_pass.py
84189c840d71a5dccdc08b92a22eb837b2fb5405
[ "Apache-2.0" ]
permissive
clhne/tvm
49c8be30c87791d5e8f13eea477620a829573d1c
d59320c764bd09474775e1b292f3c05c27743d24
refs/heads/master
2020-03-29T21:16:30.061742
2018-09-25T19:15:15
2018-09-25T19:15:15
150,358,639
1
0
Apache-2.0
2018-09-26T02:41:46
2018-09-26T02:41:45
null
UTF-8
Python
false
false
372
py
# pylint: disable=no-else-return, # pylint: disable=unidiomatic-typecheck """The set of passes for Relay. Exposes an interface for configuring the passes and scripting them in Python. """ from . import _ir_pass # Expose checking expression, should rename to infer_type. # pylint: disable=invalid-name check_expr = _ir_pass.check_expr well_formed = _ir_pass.well_formed
[ "tqchen@users.noreply.github.com" ]
tqchen@users.noreply.github.com
d7bd3622606e45ed5e769ce1df0a83d3c78fd893
ca74ab5f803f207a70fd67d3e74b896033fc6bd0
/lib/roi_data_layer/layer.py
a8b188624d895e430b00dd380ed8ac36ef1929df
[ "LicenseRef-scancode-generic-cla", "MIT", "BSD-2-Clause" ]
permissive
ShuangLI59/person_search
aae00ca2430b1d6507bbbdc656e9c2afa61fac7d
007fa6ea4aedaad6f510772d8c65c47a58ce9e63
refs/heads/master
2022-11-18T04:27:05.390110
2022-11-06T15:55:01
2022-11-06T15:55:01
55,614,709
807
271
null
2016-10-25T06:18:28
2016-04-06T14:51:39
Jupyter Notebook
UTF-8
Python
false
false
7,522
py
# -------------------------------------------------------- # Fast R-CNN # Copyright (c) 2015 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Ross Girshick # -------------------------------------------------------- """The data layer used during training to train a Fast R-CNN network. RoIDataLayer implements a Caffe Python layer. """ import caffe from fast_rcnn.config import cfg from roi_data_layer.minibatch import get_minibatch import numpy as np import yaml from multiprocessing import Process, Queue class RoIDataLayer(caffe.Layer): """Fast R-CNN data layer used for training.""" def _shuffle_roidb_inds(self): """Randomly permute the training roidb.""" if cfg.TRAIN.ASPECT_GROUPING: widths = np.array([r['width'] for r in self._roidb]) heights = np.array([r['height'] for r in self._roidb]) horz = (widths >= heights) vert = np.logical_not(horz) horz_inds = np.where(horz)[0] vert_inds = np.where(vert)[0] inds = np.hstack(( np.random.permutation(horz_inds), np.random.permutation(vert_inds))) inds = np.reshape(inds, (-1, 2)) row_perm = np.random.permutation(np.arange(inds.shape[0])) inds = np.reshape(inds[row_perm, :], (-1,)) self._perm = inds else: self._perm = np.random.permutation(np.arange(len(self._roidb))) self._cur = 0 def _get_next_minibatch_inds(self): """Return the roidb indices for the next minibatch.""" if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb): self._shuffle_roidb_inds() db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH] self._cur += cfg.TRAIN.IMS_PER_BATCH return db_inds def _get_next_minibatch(self): """Return the blobs to be used for the next minibatch. If cfg.TRAIN.USE_PREFETCH is True, then blobs will be computed in a separate process and made available through self._blob_queue. """ if cfg.TRAIN.USE_PREFETCH: return self._blob_queue.get() else: db_inds = self._get_next_minibatch_inds() minibatch_db = [self._roidb[i] for i in db_inds] return get_minibatch(minibatch_db, self._num_classes) def set_roidb(self, roidb): """Set the roidb to be used by this layer during training.""" self._roidb = roidb self._shuffle_roidb_inds() if cfg.TRAIN.USE_PREFETCH: self._blob_queue = Queue(10) self._prefetch_process = BlobFetcher(self._blob_queue, self._roidb, self._num_classes) self._prefetch_process.start() # Terminate the child process when the parent exists def cleanup(): print 'Terminating BlobFetcher' self._prefetch_process.terminate() self._prefetch_process.join() import atexit atexit.register(cleanup) def setup(self, bottom, top): """Setup the RoIDataLayer.""" # parse the layer parameter string, which must be valid YAML layer_params = yaml.load(self.param_str) self._num_classes = layer_params['num_classes'] self._name_to_top_map = {} # data blob: holds a batch of N images, each with 3 channels idx = 0 top[idx].reshape(cfg.TRAIN.IMS_PER_BATCH, 3, max(cfg.TRAIN.SCALES), cfg.TRAIN.MAX_SIZE) self._name_to_top_map['data'] = idx idx += 1 if cfg.TRAIN.HAS_RPN: top[idx].reshape(1, 3) self._name_to_top_map['im_info'] = idx idx += 1 top[idx].reshape(1, 4) self._name_to_top_map['gt_boxes'] = idx idx += 1 else: # not using RPN # rois blob: holds R regions of interest, each is a 5-tuple # (n, x1, y1, x2, y2) specifying an image batch index n and a # rectangle (x1, y1, x2, y2) top[idx].reshape(1, 5) self._name_to_top_map['rois'] = idx idx += 1 # labels blob: R categorical labels in [0, ..., K] for K foreground # classes plus background top[idx].reshape(1) self._name_to_top_map['labels'] = idx idx += 1 if cfg.TRAIN.BBOX_REG: # bbox_targets blob: R bounding-box regression targets with 4 # targets per class top[idx].reshape(1, self._num_classes * 4) self._name_to_top_map['bbox_targets'] = idx idx += 1 # bbox_inside_weights blob: At most 4 targets per roi are active; # thisbinary vector sepcifies the subset of active targets top[idx].reshape(1, self._num_classes * 4) self._name_to_top_map['bbox_inside_weights'] = idx idx += 1 top[idx].reshape(1, self._num_classes * 4) self._name_to_top_map['bbox_outside_weights'] = idx idx += 1 print 'RoiDataLayer: name_to_top:', self._name_to_top_map assert len(top) == len(self._name_to_top_map) def forward(self, bottom, top): """Get blobs and copy them into this layer's top blob vector.""" blobs = self._get_next_minibatch() for blob_name, blob in blobs.iteritems(): top_ind = self._name_to_top_map[blob_name] # Reshape net's input blobs top[top_ind].reshape(*(blob.shape)) # Copy data into net's input blobs top[top_ind].data[...] = blob.astype(np.float32, copy=False) def backward(self, top, propagate_down, bottom): """This layer does not propagate gradients.""" for i in xrange(len(bottom)): bottom[i].diff.fill(0) pass def reshape(self, bottom, top): """Reshaping happens during the call to forward.""" pass class BlobFetcher(Process): """Experimental class for prefetching blobs in a separate process.""" def __init__(self, queue, roidb, num_classes): super(BlobFetcher, self).__init__() self._queue = queue self._roidb = roidb self._num_classes = num_classes self._perm = None self._cur = 0 self._shuffle_roidb_inds() # fix the random seed for reproducibility np.random.seed(cfg.RNG_SEED) def _shuffle_roidb_inds(self): """Randomly permute the training roidb.""" # TODO(rbg): remove duplicated code self._perm = np.random.permutation(np.arange(len(self._roidb))) self._cur = 0 def _get_next_minibatch_inds(self): """Return the roidb indices for the next minibatch.""" # TODO(rbg): remove duplicated code if self._cur + cfg.TRAIN.IMS_PER_BATCH >= len(self._roidb): self._shuffle_roidb_inds() db_inds = self._perm[self._cur:self._cur + cfg.TRAIN.IMS_PER_BATCH] self._cur += cfg.TRAIN.IMS_PER_BATCH return db_inds def run(self): print 'BlobFetcher started' while True: db_inds = self._get_next_minibatch_inds() minibatch_db = [self._roidb[i] for i in db_inds] blobs = get_minibatch(minibatch_db, self._num_classes) self._queue.put(blobs)
[ "st.cysu@gmail.com" ]
st.cysu@gmail.com
ae02b14171429a5182162ab7f4da4271b917afb0
5f6c16e89cf58304c2e70f1e34f14110fcec636c
/python-swagger-sdk/swagger_client/models/inline_response2006.py
07fbec9fdc5ad9c1c909603b3c658606843c2559
[]
no_license
mohammedpatla/secretapi
481c97901a5e92ca02e29470ab683df80ea0f26a
df420498bd0ae37fd1a152c3877a1342275a8f43
refs/heads/master
2022-12-25T01:55:18.038954
2020-10-04T23:13:54
2020-10-04T23:13:54
null
0
0
null
null
null
null
UTF-8
Python
false
false
8,041
py
# coding: utf-8 """ API for Secret Network by ChainofSecrets.org A REST interface for state queries, transaction generation and broadcasting. # noqa: E501 OpenAPI spec version: 3.0 Generated by: https://github.com/swagger-api/swagger-codegen.git """ import pprint import re # noqa: F401 import six class InlineResponse2006(object): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'inflation_rate_change': 'str', 'inflation_max': 'str', 'inflation_min': 'str', 'goal_bonded': 'str', 'unbonding_time': 'str', 'max_validators': 'int', 'bond_denom': 'str' } attribute_map = { 'inflation_rate_change': 'inflation_rate_change', 'inflation_max': 'inflation_max', 'inflation_min': 'inflation_min', 'goal_bonded': 'goal_bonded', 'unbonding_time': 'unbonding_time', 'max_validators': 'max_validators', 'bond_denom': 'bond_denom' } def __init__(self, inflation_rate_change=None, inflation_max=None, inflation_min=None, goal_bonded=None, unbonding_time=None, max_validators=None, bond_denom=None): # noqa: E501 """InlineResponse2006 - a model defined in Swagger""" # noqa: E501 self._inflation_rate_change = None self._inflation_max = None self._inflation_min = None self._goal_bonded = None self._unbonding_time = None self._max_validators = None self._bond_denom = None self.discriminator = None if inflation_rate_change is not None: self.inflation_rate_change = inflation_rate_change if inflation_max is not None: self.inflation_max = inflation_max if inflation_min is not None: self.inflation_min = inflation_min if goal_bonded is not None: self.goal_bonded = goal_bonded if unbonding_time is not None: self.unbonding_time = unbonding_time if max_validators is not None: self.max_validators = max_validators if bond_denom is not None: self.bond_denom = bond_denom @property def inflation_rate_change(self): """Gets the inflation_rate_change of this InlineResponse2006. # noqa: E501 :return: The inflation_rate_change of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._inflation_rate_change @inflation_rate_change.setter def inflation_rate_change(self, inflation_rate_change): """Sets the inflation_rate_change of this InlineResponse2006. :param inflation_rate_change: The inflation_rate_change of this InlineResponse2006. # noqa: E501 :type: str """ self._inflation_rate_change = inflation_rate_change @property def inflation_max(self): """Gets the inflation_max of this InlineResponse2006. # noqa: E501 :return: The inflation_max of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._inflation_max @inflation_max.setter def inflation_max(self, inflation_max): """Sets the inflation_max of this InlineResponse2006. :param inflation_max: The inflation_max of this InlineResponse2006. # noqa: E501 :type: str """ self._inflation_max = inflation_max @property def inflation_min(self): """Gets the inflation_min of this InlineResponse2006. # noqa: E501 :return: The inflation_min of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._inflation_min @inflation_min.setter def inflation_min(self, inflation_min): """Sets the inflation_min of this InlineResponse2006. :param inflation_min: The inflation_min of this InlineResponse2006. # noqa: E501 :type: str """ self._inflation_min = inflation_min @property def goal_bonded(self): """Gets the goal_bonded of this InlineResponse2006. # noqa: E501 :return: The goal_bonded of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._goal_bonded @goal_bonded.setter def goal_bonded(self, goal_bonded): """Sets the goal_bonded of this InlineResponse2006. :param goal_bonded: The goal_bonded of this InlineResponse2006. # noqa: E501 :type: str """ self._goal_bonded = goal_bonded @property def unbonding_time(self): """Gets the unbonding_time of this InlineResponse2006. # noqa: E501 :return: The unbonding_time of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._unbonding_time @unbonding_time.setter def unbonding_time(self, unbonding_time): """Sets the unbonding_time of this InlineResponse2006. :param unbonding_time: The unbonding_time of this InlineResponse2006. # noqa: E501 :type: str """ self._unbonding_time = unbonding_time @property def max_validators(self): """Gets the max_validators of this InlineResponse2006. # noqa: E501 :return: The max_validators of this InlineResponse2006. # noqa: E501 :rtype: int """ return self._max_validators @max_validators.setter def max_validators(self, max_validators): """Sets the max_validators of this InlineResponse2006. :param max_validators: The max_validators of this InlineResponse2006. # noqa: E501 :type: int """ self._max_validators = max_validators @property def bond_denom(self): """Gets the bond_denom of this InlineResponse2006. # noqa: E501 :return: The bond_denom of this InlineResponse2006. # noqa: E501 :rtype: str """ return self._bond_denom @bond_denom.setter def bond_denom(self, bond_denom): """Sets the bond_denom of this InlineResponse2006. :param bond_denom: The bond_denom of this InlineResponse2006. # noqa: E501 :type: str """ self._bond_denom = bond_denom def to_dict(self): """Returns the model properties as a dict""" result = {} for attr, _ in six.iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value if issubclass(InlineResponse2006, dict): for key, value in self.items(): result[key] = value return result def to_str(self): """Returns the string representation of the model""" return pprint.pformat(self.to_dict()) def __repr__(self): """For `print` and `pprint`""" return self.to_str() def __eq__(self, other): """Returns true if both objects are equal""" if not isinstance(other, InlineResponse2006): return False return self.__dict__ == other.__dict__ def __ne__(self, other): """Returns true if both objects are not equal""" return not self == other
[ "lauraweindorf@gmail.com" ]
lauraweindorf@gmail.com
c2901094b0c4b4a53907e0010cd8c43666a720bb
c9500ad778b8521aaa85cb7fe3239989efaa4799
/plugins/get_url/unit_test/test_get_file.py
1b7ecf108a88e562d02711af4289979fc0778ff2
[ "MIT" ]
permissive
rapid7/insightconnect-plugins
5a6465e720f114d71b1a82fe14e42e94db104a0b
718d15ca36c57231bb89df0aebc53d0210db400c
refs/heads/master
2023-09-01T09:21:27.143980
2023-08-31T10:25:36
2023-08-31T10:25:36
190,435,635
61
60
MIT
2023-09-14T08:47:37
2019-06-05T17:05:12
Python
UTF-8
Python
false
false
37,536
py
import os import sys from unit_test.util import Util sys.path.append(os.path.abspath("../")) from unittest import TestCase from komand_get_url.actions.get_file import GetFile from komand_get_url.actions.get_file.schema import Input from unittest.mock import patch from insightconnect_plugin_runtime.exceptions import PluginException sys.path.append(os.path.abspath("../")) @patch("urllib.request.urlopen", side_effect=Util.mocked_request) @patch("insightconnect_plugin_runtime.helper.open_cachefile", side_effect=Util.mock_for_cache_creation) @patch("komand_get_url.util.utils.Utils.create_url_meta_file") class TestGetFile(TestCase): @classmethod def setUpClass(cls) -> None: cls.action = Util.default_connector(GetFile()) def test_get_pdf_file(self, mock_get, mock_create_url, mock_cach): actual = self.action.run({Input.URL: "https://test.com/v1/test.pdf", Input.IS_VERIFY: False}) expected = { "bytes": "%PDF-1.5
%����
3 0 obj
<< /Linearized 1 /L 15007 /H [ 678 125 ] /O 7 /E 14477 /N 1 /T 14726 >>
endobj
                                                                                                                 
4 0 obj
<< /Type /XRef /Length 50 /Filter /FlateDecode /DecodeParms << /Columns 4 /Predictor 12 >> /W [ 1 2 1 ] /Index [ 3 14 ] /Info 1 0 R /Root 5 0 R /Size 17 /Prev 14727                 /ID [<4dac181eb10e569cb7930abd3bbd36e1><f8f4a6b9f7562a333372614367963140>] >>
stream
x�cbd�g`b`8	$��XF@���*��	=��.�w 	F4 �.
endstream
endobj
                                                                     
5 0 obj
<< /Pages 14 0 R /Type /Catalog >>
endobj
6 0 obj
<< /Filter /FlateDecode /S 36 /Length 48 >>
stream
x�c```e``Z� �Yp e31B�����R���v�a  ���
endstream
endobj
7 0 obj
<< /Contents 8 0 R /MediaBox [ 0 0 595.276 841.89 ] /Parent 14 0 R /Resources 12 0 R /Type /Page >>
endobj
8 0 obj
<< /Filter /FlateDecode /Length 118 >>
stream
x�mͻ�0F�=O�b��iV$��C����P���S.#�w��1�ڡP��KO6t��3CY�Cw[�2tO�=E
 �����Bu��M���4����!$ePH�^�� or[s/��"�
endstream
endobj
9 0 obj
<< /Filter /FlateDecode /Length1 1578 /Length2 10778 /Length3 0 /Length 11818 >>
stream
x���T�۲-�5�KC�������@h�qN�	�����]��#{�{�>���xo0F�ͪY��j�Z��Lb��@i0����� ����
`e�`feeG���A��2#Sk�]@`�$��f�W��䕧� Ȼ��8 l��l<��� vVV��!:8�$��@� %f���L-���������?� :z �_� 1{�3��P2�� �_O�0�h8X����JA'h�8򳰸��3�ٻ0;8[�3�A�:�����i�lf��3fdj����o������x5؁,�`��W�%��z8@CN���MV�����l l�l�N���?�@࿂�,,���� �5�
d�H+2C< � 3�������k���������W�f i15��k��j����qav��i��O��)K�-%��`���$A�@�ױ{��}���`�+���O���,Z`��+PN�_�W�l�@������� t =,lX����t��d�c~������`���d|����b�@�]����t�7BfcX�,  s�5����f������A �W��X������U^�`;�����_i9=u��;��O\������
`b�b������Q5����ʁ� |W�:�����_���r��;��ëj� ���ܐ��������Y����)�O�����wAҮvv������f� ;�^E�
y] %��5 �o���UZ�\���Wb��b`k���"� Z�� 6��o�֟-����.�?�
���j���u�,>�>.����|ݜ�>R
l�`�g�ع�f��f�ȯ���� �l��h	��K� f��5�ڞ/����ύ�� X����B*`��d� ��,V���^ ��? '���ߐ��y:ڼ>&�a����`��|�0�Ȝ| ��G�����_Os�Gi��{��kZ��ί�_z~���^' �h��8�`!l[�r[%F�δ=&4M���J�����z���D_���|-�4؅��)Ew%�D��}�X��)A�����$N}r�ya�o<�P�{/)	���ϓ��v��F�vy�\'W^t�/ط�=2�{K�GB��v*�PK����"��P�g�P�C�H�c�y�������!��c@�=��(��_g����Z-�dw� �"�' �������K�ǟ�.*X�[�h, �AcLYa�`�cϨ�G�뻫�F�ضr� ���}�[	���8&Δ*���&ؐj�@ҽ6+�Ֆk�N+��%���/�4=�C��N�Z����>��Д������N_�o"�"$��l�l�W��3HS�J �o:t�r8�KnI��pf!�ށ���Dl�����p��#$'\�>�Ѿ�ug�m�3��v�XFA/A��/��)gg��6R���tyR���~�Ъ	� �f�JX����%�=��7����
8�d���+�=m���2�h�k5�r}U��:�`ݫ
�Ỷ�۸.-7��wо;+w�������M�?��E%MF,�M���l7���cMq7
����:�B������%r�Դ�J�
���|�$�	��E�>_���״���A�fb�FSg�,ü3i��>&'�м���k�{~jt�bxr��ۮ��gO@�+2�.��b~�?�rM�.2��*�᧖�X�Iu�Lp�h`V�B++��ؑ, ��6�b��\B��W��=yL�P��a�O�+��R!�=h�5I�0����G)�<5p)qz8���z.1�b~Y�
�� ;��f�=E�U�n ��f"Ԧ�,-�@ML�:�*�TE05&-������Jf�Ş	�1(yH	M�7|,w5� ���9�Y����!t�ykwJi���s�0*�ʗ 5��ݱ�Iӏ?���X�}�^���Ok��2ڜ��^+��G�����$4��I>�C1Ʌ3Hj���P�d�%�����B-�E�Nkl�uVpe��xHג������B�k&�}�=	��?Z�Q�9��65�p�p�Qd���5}�:�^��۷cAĺFZz�$����Z6��ͽF�8gx�x��4�N�A(t���9�����@p0H�Ɉ/3��m&�/.�݊LF�L��@�ͮ�:��R�-�JEe��_�����n�Ap8���'B$T��a��N4���t#?B�����Q������k|Z�������z��J'_��Ǌ�Dz�*$�Y����2<�΁�ӈoF���~ �k��&�P�Z*J4d�-�\ύ��{���:Q(ZNUl;~�b�f���}L�ߩ���uGu|7q��O�S�>9�����/v#���5@�ɓ����[׽<q�ix|�:6tądK��O����� Ri��`E0�ƅ;M1��tUe���m��� m���S�ܣ��}���
 =�F��b�TW�A3@�tt`�#Z�3ZN�_n�bNͺ�
���4��'/a-���;ˮ�ZU�=��L[�^C\�eG���D"[���M��@��p
ݻ�]8�4|<%bT*T0����C<fg�&>ݖ���s�g?%��̵g��V4�-��n���j��3H69��
�aK��9y�O�wi�o���'dz��V`��^���#������J�J)��+����1:�$������+� BƼӀ��Nձ��
Z����aJZU���Za'�=na	L�t��0��/��V�i��\���!��������l�\��c�#M켉o�j7s�&�V��D�k�fn�^�Z��&U9�8}��ld�K����*c����C�WKF���8�����ZFæ�Ѱ*T�U��A�-���sdAGݡ!�9B�B뢔���
ҷ�y��qm��vtv��YK�S�Q�{%�ɳÆ����8���F>J�6���� �]�?q��� �{�u""����#Q��"4��<+qF	/�R�,��$��G%�z�I[�&������$�#�Rw!���}�:��-&F0����s���nݭ��|-������vM�� �3!n:	�8�d�M�%�����%&[0�����|a��Ѹ�
as-/�Bug6�����n��`��(�lH$GjVQz)�(�>n�
0�8�{b�m� �rIU���%�b�䘬4&����:���#C��Ǟ<a3]�ue�te8�\���8c�����Fvh�\���`Mu	Ĝ ��a��?��KYX?��E�yh{^�-aNfQ���H������-�44.�z����Y��3.�AO�\���Rb)�j���*��i��3	v��t�(�ʗ���
,�˺����1�޷���%m�bc���t�	��ĠIH��h��Yo��UK�.pPm�٘��*�m����h�Q��;ҏr��Mk�YN�v伤sǯ�Xp4�2}՟XO.
o��iL��B���Ul煲C5îs�]��w��g��u�R�0{hT���4��*K��gI�Z5R�`��ZR�����&�yԞ�2���䫒iP~���w�&��JD���#炌���^�c='�oK�	jA�͂�'����c��ZR_�.�#Ϧ�:f��F���	C뫉���[\�P�zI��0(���i{̵g�V����,�u���>��u��YKc���`���P�E�kvs�'ґ��I���֖ό?d���|�;6U��j��l���������q���Ҵ�3�7[*�����ˉKNW��X�}Z�P�͐�ՙC���w��
"��y�7�"������S�V|0ʺ�c���j�%Ӥ�u��L���c;,V�[;lT�Ə��{�̃=�?����<�܂��Ԉ{U}�	��.�6ӂ����j�S�.��W
^��|{���O��Ȉ�<��Hz��F�Crޤ���9���% �'g�Q���ՇK'���ݟK���Zݪzp�ˆ��� �<�
,ː��`�aSr���Z�|ܹ�%kH9]j ˖<�vl��|�W�(��#�,�tY��s�Ú�w��#:Dؠb�w4Aƥ�!��\��ψ�&��޳�TG�6�o�u�S�أ.nL�.nf�~���$w^/���K�[%����2�i8vts��:b��������<
ߟ��'��4:$�%[sj��T�k�vߎ@���[��f��H�E�����&���+]�hm*��1e8&E4prD���O<c��ek�����x}1Q�dU�E
�~�q{~t��W�5 '�ٵ�9ƅ�~����uWWp�%��AH�j�xſ����=��(�$E�*3$0ք����&��?�&m�Y6�@�`�1xa�5 b�(�����%JAv���q�*ي��b��`� ������8�F�}��T>Yii(8Ɏ�|��
��	8���b�̥������^^�i.�\���8]������۳���G0I1���tt uG1-�8��]4�C�@�gQ U�pD��!$�8"gD��vxw9݄i�>b��hS�Ǵ�Q���ӌ���'S<;!@�Eb[<���"����6IG�|�'���T?()k��Ʀ�3n��J2�s����\ee�����%����w��K�U�Xz��-i����z�/�rP��4ɂ/p-�'�@70/v@�t ��vu	G�Y���g�&��4N��W�3��@�q��ϿX�|9֒3��7)e��1�wi��\&f�uu��|����zꢌQ�g�mG�)d�N#Td_G��M�~D�}����ېop�C�	0Zy�L��P�v�V�F$�+M0X�����x���\4�� )ݬ�֟	'M��"<t��A�V��"���ۻe��	�����)���7�
�&Z9��'��j���Ǽ�iծ�#��/�R���Bc�f{�(6�*`L��O26o�Q�y]=W��/�W/�\��|-���:=?R<��f��v�k1�C�Ar�w@���m*!=�t&@��}Ri��g�	x<�2׽�&��_�=^A�!����1Z�nPdh^�����Q���ʚ?8�*t�EZ�q��o�9�oHA��+�V�AOԙ3���h��홱hG56�$�G�<�	x��]�I|/�߯3�Q)��e"��p[��V���9��5�q�������f� )��ϰcUb�gCEz_�m$z�tg�`<�(�p-�U
�����%��t*V�a%���	r-��Z��h�<=���s��'���揉Z�$CP/�Y$�A0Q�Jbq�7��J��l6��f��� j��)�c�h*��Rm�[&�P	2�x�A����V�.����K�[�\�E���YU�����xW�{�^C�S�>���n�x~o&[�E���"������V��l�9��J�^mz��7�	K��䃩�YV� Z�A�X8��TۈT��.w�N�#32b�<�Ѕ�Y0>���P������m�^?��;�:?�Ҷ��\�SY��ѽ�+�D�.#���"�vy���'i�>��*�
Lk���L��������4�7�>~~�;��6��H��*�a���gp�%R��7-���K��Q���X�Z���4�nl�{�Ն���7��[�X_�p��u�rɦ�~ɘ�H7�pR:�d�j_~D�)����>��˿L��f�i**����Uda�<)����s�.���'��;��kq�$������b���ѷd��_ݔ��5x�;���*I��Nm��Hi����QJw����������󶩸�j4�4�0?�[&�PLy��v�+�5N{}t�}�'�o�-�8�Ȱ��[&�n��P�T�;xm�����N���+X�"��q�J���SZ��Ա1�Rj	~��2���p�X[R~�'��I����7	���>���=+��7�_�U}��)�[��ߍ�i�֣K���=��$�����!u��${�d]g�_�z�&Ri��8�<�5D�s\�k��ؚc/S7@��X���T^N2I��tzU5·�o��t���e��p9�a��MH`0*��b������:�.dgFO�w�h��.H���*�� h�v��V��7۬����rZ5JK���,3/|�|Co�7�Dv�}bD	�Wn����]y������ꚹ>�h�2D�� 9~~���e�/��$ã3*F+�'�V7���S���[�9n9C��ݕk�G�Am��7�³�|��X}��)�pO9�����]��3�`F�C�T׊��b�x�>jd޻�W�Yc��:��[ھ+h�\��Ap�G)�lqq@cj�GV���٤,�Q���f��~�������Gq�V;���ߪd־�[��i�ܙ
{�aܜ�w����G�LÕ�xd�$xGK��wo}��!m�l�}b�����τ#�n]�
�>�H�!�I�?cc3�S3ԂkV~
ju���.-�X0\S�W#+7��kp��u~��*ɦ����n���߲�el^�Ӌ�LW�
�_v�G���b��L�\ޭ�U XJ��" f)'Al����d���2��x;����I�~4@��p~U�@c[5b�B�D�6��g����Ԉ�)��*O'~%ٱޖ� S�� ��RܼQ����*,(�svjA�Ɲ��;06ƿ�6�G������[���,tt�&�݃���F���^þ�F�B�&��x��4-j��j����oT�tT�K�-�~��R�Χ�U��*�T�H�PCVF�B�Y�v4t:��� h���d�ߙO�"���C��.!H�UN��n����e�p{g�+��	-Q/ˡ��F��bA�1���AC�ћ��y~��׺q�I���42X"��}�����%�R8'Kb<cQ?`o��&�T@���D_��Wk��X�J���g�Gm�=�
�PDEh
]�v��m�췱����e⤊�s>��t��*�br�*�=�Yh�PQ��m�A_	W�)��`�i��<ܪx+]�M.j�����;Y�!zCCU�&��OM�^a�E��TM��*U�v�N�RHLM�s�t� n<6|�n���NU ��kID��x/��1+��V+���D�`��b��n��v�'O@~�#�Ǆ��g^]˚a�5���N�i|���)~�"�j.�����/O���7�hTkmD��"C�,��+Zb]��Vq���{���[�,TQ�]�.�
��H��Du��m�"?��:m�T��/�Ķ��~u��/�n��[��ZĂe�c��ٱ�;U��$ �۔��s����.�C*�?GA�Q�0T�M{��cԲ9S���9��2�3�����L���\��]ĸ�>:͡�F��Y�j��i���ΔoWU|6�B)f�
�����V}9,ĺ����~C�N������ ��p��jo�L�sX����[�uwa��Z����N�F�8��!;�m"=~��)�I79����:�k ])=�<k���U���V��U`��O;�"́UD���w�c�uz%��M�?s~#|�~�i��'@�� �C���8z�җ�4ԒV9�p��r�P�5�Y�|���ԭ�Y"�%F���:���l�� .a/`�㘂9����tR��h�����	�%D*���K����';��c=�����U�o��ʔ��ϙ��?��һy��═��l\�a�����p���ٷQ�`�m�$�Wgq�У(L6 @2�M�[ό�͋��~�<���֎E..��F�c��N���5ȵ��쯖~�>ҭ��T��Ѝ���1�h*�e}@LQ�#�R��y����o�ϮM�~Ę���<Y�L���}HE�h~�D(�г(��L���iw)t��vm�6��t�A3�*T�1������偞�Jw�<����+n�@\4Q�uX^:�OD�����>#|�n�q~�~�$�fќ��9�#�AdЍ�C�RV���t�����n|��7�y�o
֢��\�q��eE1�����߻�a���f��!U���n�_rD���D#=�;����n�(% ��^VCy�T����-y���h�c������ �$�&m��am9�,�݈��А�<���Ɍ
I�p�z�c�Ĕ9���=]n��1=�؜y���h!�&�8�Hn�:�τR��S������9�x_�q6&ߪo��ٷ/ܽ&��gT�ĵ �I���P49$��IE��ۨ�`i-�fo��|-�TRntC$E��[���0�^?�NDJ�˴��N�ة}�i� ;�K O\�;K^
�������{m�cz53c!�?*�0��5�}'*RH��3�U`��J�[١���`��9��?�Bj�F�����0HzB=U/���,Wh�gQj��X�ڥ<A��
�x"\��s���8�6)�*�ړ}4	�ˁc����;>|��U6\�J6J�j��L�45e2Q�,�#��]$����HU!�3-b�kJ����US�<���������+rZ`􇻖Z#��
|�}��{�f*1��@�i��ID~�B�UA�Ɛ�b��<�)��g��ܾ2�,Z��;WH���bGiI��,�7�&ͦ�Yds�M���X��$F̯q7c�~o3s�6�ְպTdb�_�G$fxF[��Y�	�fsx
��k5^�? v;����c�]�z����o�\���߉��|�2+�I�]JZ
���̵���U�a����|����ڟ���i�W��U�rR���\0�B�/��M��@H�,��@j�Y�;va���n#j-s�+Y|�P���K��=C�h��V���ۛ��u�e��ā\TS椉5>-R�s�gش���?WǮ-��Ha9H*���3��wD��"�K��K�σ���_Ҝ)9j pv�eE�j;Z�J~=@�ŉ��d��j�X���(��#�˭K`T�Ş�x�F&� zD��pGz� �N������d��Ȩ`��PX�;Į4�-'wH+��Dt	J�aM��!f7<��[ ��Y���w�w�H+Dc�I�l~���j����=���Rǵ\�C�Ȥ�;���8h�H�K��[nV�fP)��\'kJ��Mc���S�r}w�~>xf�F�5c���aC\���t�	�q��p�b����+�So5ȸ������+(Z�դ��b�M����h�F� 9�Β�� �5���:� ��1)�͏�܈�I"��>�u�K%��fH����`�oT8�8�M	�Z��fW�X}�Vf,)�Ԍ�2;+�}s��*^�ƻk�ʠF]<IIbD��
K�����]:�'X{o5g�L=B��pHr{b�Zd��>��G*����%���`�]��O�>��#�D�*�Y&��W�~�i�|$�4p&
9�)K�o��pZ�ς�r���kT;��R��+��@�w����tQ��͚�pĢ���E�u��b�n6-ԋ�� RkY~�oB�m��Q�ߥkD����D��·40���;`��peNg�f��,.�k��E��Qg����z_^
ѯ�q-���Gi�}r�+���S���W�� \�����m
�`����U���g��+onA�6P�bAhN2ݵbC����G Ws�M���:�ß��B߯��|�O��f��;��Yݲ\���P��ŜP<Q$zo�<Z�W�G�:�e�6e����6!���\�oѫц 9ƷC��i����ڦ���}��X(�(O4n���(�Rt7vM���َ�|Z9��P�7Q��F8��z8��gd�TQB��N�Q��{���F��B��N#׊�	+��$3�c��
�Tf�zִ��(_���X�e��Y����Ǐ�}cr$���v�4�s�G�l����� ]���Zi;e0�=EL��݆Zh���=[0H�B:9ʒGpw�Lϟ�b˜Ga�)^����v��0�|I���!�G��M<x�*%@���2�vs��ǩiW9��WD��^�-\��P)�t_V�<ɍw�h`d�Rv�gˠ�J�Ð���:�N*Ș7�?`2W������'�)����~�Ap�~[\�
�e����gD��R�ZT�c�O'��1��ρ*����f�q���ZyG*�5�����2:���A.|C��s6�^��6���R��7�"Qz�:�yKW���E9�a�o��}93�'���ŷ����Y<5�M�0��o�C{+�q�𥰬���}qf�u���K�N���W��k�vv��dd�Q�� Yߊ?"�	�yhj��RZ�'���W�P@q~~2�K�o���%8)�G��w��@�d*quB5xn�i�{V���g����j�L�n�$�@�У�J-�c�����x��v�ҋ�wX��G������P�DS�*��D�FoRE;�����`��f�tY�,��i||��U}��-� ["������$��ay�4@hcP�ّ��ذ������͵��o��]]vv;�V<`��r�$�H�g�N��{*�(I�le]F"�b�e�R0������ׅMi�A�ت�?�G�˹���N�Ep�O4�6�X�ї��?����qѳŗUv8y~B�� ��e���/��c(,���xX~N�ĩ����,%�:�CX�R�4xzS�)ƻ���A+� L���H���:G����r�C���fhI�{���ܦ���6a�ַ1P��b���sSR��W悏�i��b��y˻Jܗ��}���;�&RL���D;��q�PL�F��֘��\�3�x0��?oa%�����/�yQ�������LӠ�6"�����6�}��}6�bih@8���c��+�����|�:�}Z]�5�Ӵdސ�e\է	���w�?�T8U�����/�Qe���7V�Wg��
O���\�Ss��,���7��V�zbh
�}�L|�i$|�7��%"�/B*9H��Z��G֍��P9�z�P����"�$0�ԿCu���Ǧ�YCBTV�I߽��W) d�+�b�����ӏn{��,麱��i�vqu����e��ǐCER�J.�ٹԷ$e�	����|WD�Xb�{ͽk�{�>�m�K�G�DNh,��X1b4�������'TfE��x�4l�$���oK�ss���S����B��]n,��)~��_��[7�@��������-��{�����F�Ы�u=�s�h~�4���:a-�-5���
�	�@���W^ߡ(��Q�%M�BRKeL��<���Y��a%#���,Ml��s���ܵđ�GP��X$�[��S���5�F#P5����w���>v	��biT̵��rV-��Y3E�7��h:�N�o����!������W�P��ᙛ�$��YA�J�I��^w]OO��#��0Fm�;��|c�N�*�)̴�K�di�n]��V"�A:?@������PCd	��*px����_���M���e⁉�ل8g����(��d<	A&E�Ż����hO�Sտg\�l�r����t-JGE0,�N��z������� �?G1כ�QC�+ȽC�k�k8۵�/�c$�#��H��=�KN�F���Ʉq8^\���������پd���O�ӳ%i��=���+�7H��Ĩ�Y1ٖ�Xǵ�3�+�f��KC:�}�aWM�}z���N��U��F�P �d�>��X�p����t��y�d$�\#�㪫�s������(*WeMo��z�Lg��X�E3��ޞ�������M�u�qq��%̇�6|<Ɖ��`]���������w����5��թ��F��ػ��Һ�P�^�'�e^P_�����3�m�t,@���P�{ᗗ�Y]�����΋��J�t�̒-�eDgM������9]oqg�*9��$���okH���O��4�RZzI��tCV��l��ͧ-st_He��80��:gj��[Hʥ���8Lh�?]G�:
endstream
endobj
10 0 obj
<< /Filter /FlateDecode /Length 740 >>
stream
x�mU�n�0��+��J�b;$�
!�	�8�*�j���n$H�$����#�l{ ����3�`�~�l'�l�f>r�j���f��ܵ��]�瓩�_Ɣ�g�'��5���>�d��,yS�siF��$mޫ�S��3&ũ|�?Wǡ�'ܷj8Z�w����M��%�M�WM���#���u�6'x��E���U]v1li������2r���o?���6��	�K6}����}8����+MW����F��ٞ��h���`�b�9؆����ɰ�w�����0�ƂTMi�vW�nW��`���-�|���o.���HM,����h,eh��Q��&CM��-���,���8Q�`q�L0��h�z(�P��.Vר �������,�h,%��%ա���5���8�8pL������B���$q�Ʃ/0��8�x��?r��x�y!�B��=�X������y���82�VAנp�"����Z�q�x�8tkxΛ��_�����S�8k�H`�����n���k̀��RONH=CpB:#=�%8��88њ�BC��/�9�!ɨ~B�}���Rq҉�T��FI��ܨ�ύ�|nT�s���|neEA��xw���I}�Ɵ����I��y��k�t��g>O:�yұϓN|����I/|���y���I�>O:�y�k�'��<���)>O��yJg�;s�|�K�ۄw���箳�{l�C�'����=n����=���F�y���P
endstream
endobj
11 0 obj
<< /Type /ObjStm /Length 522 /Filter /FlateDecode /N 5 /First 32 >>
stream
x�uSYk�@~ﯘǖb�ޒ b�!m	�{���I�HFR ����U�M҂4��ͱ3\ .A+�����Hg �����kG�ϝ/�����]��#�{�����d�Q�b�>��W�?�>.��8#��qQ=p�/�P��a�z�ա��ه���ǃ�DI�t�ۦ�v��o�9�����zp}�B}5��l{���8����ț�����ub&�I�ȉ�4M2��[{󧚁R*QA"��'�L��I>�ED�KHs�b/�e���mQy�*��xSE@U�+=Рq��,s5�����d�Fa�k,�T�z?ix�AG}�!�f�O:;�K�"��'|�əxNO�����k.��^#����o*N�S���C�Z���YC�t���b(--���bq�l��&1�$Ҟ�3z�����5Xa���P��c�]k�=? ��qh3�����@���ϻGZ�]�L���o��fo!G��oŽ}}�Wc�oʋ�"Wף��A��W�,���Q��z���_
endstream
endobj
1 0 obj
<< /CreationDate (D:20211216143257Z) /Creator (TeX) /ModDate (D:20211216143257Z) /PTEX.Fullbanner (This is pdfTeX, Version 3.141592653-2.6-1.40.23 \(TeX Live 2021\) kpathsea version 6.3.3) /Producer (pdfTeX-1.40.23) /Trapped /False >>
endobj
2 0 obj
<< /Type /XRef /Length 21 /Filter /FlateDecode /DecodeParms << /Columns 4 /Predictor 12 >> /W [ 1 2 1 ] /Size 3 /ID [<4dac181eb10e569cb7930abd3bbd36e1><f8f4a6b9f7562a333372614367963140>] >>
stream
x�cb &F�^&�_ ��
endstream
endobj
               
startxref
216
%%EOF
", "status_code": 200, } self.assertEqual(actual, expected) def test_get_txt_file(self, mock_get, mock_create_url, mock_cach): actual = self.action.run({Input.URL: "https://test.com/v1/test.txt", Input.IS_VERIFY: False}) expected = { "bytes": "dGVzdAp0ZXN0IGZpbGUKc29tZSB0ZXN0IGRhdGE=", "status_code": 200, } self.assertEqual(actual, expected) def test_get_txt_file_with_checksum(self, mock_get, mock_create_url, mock_cach): actual = self.action.run( { Input.URL: "https://test.com/v1/test.txt", Input.CHECKSUM: "5084335576ea9ec4e9d1dcd7536dec3713b3a57a", Input.IS_VERIFY: False, } ) expected = { "bytes": "dGVzdAp0ZXN0IGZpbGUKc29tZSB0ZXN0IGRhdGE=", "status_code": 200, } self.assertEqual(actual, expected) def test_get_txt_file_with_bad_checksum(self, mock_get, mock_create_url, mock_cach): with self.assertRaises(PluginException) as context: self.action.run( { Input.URL: "https://test.com/v1/test.txt", Input.CHECKSUM: "5084335576ea9ec4e9d1dcd7536dec3713b3a57aa", Input.IS_VERIFY: False, } ) self.assertEqual( "Checksums between the downloaded file and provided checksum did not match.", context.exception.cause ) self.assertEqual( "Verify the file you meant to download and the checksum you provided are correct.", context.exception.assistance, ) @patch("insightconnect_plugin_runtime.helper.open_url", side_effect=Util.mocked_url_open) def test_is_verify(self, mock_get, mock_request, mock_create_url_meta, mock_open_cache): actual = self.action.run({Input.URL: "https://test.com/v1/test.txt", Input.IS_VERIFY: True}) self.assertTrue(mock_get.call_args_list[0][1].get("verify"))
[ "noreply@github.com" ]
noreply@github.com
224de5824678703e1aca1afb898f6e2a2f618dd5
c463d53ad1c0e345dd2f2704bbc55a9c02c1ae3b
/TLR_lidar/xml_to_csv_txt_all_class.py
4989376cfb464594a11ff6ba5a2c221844dca9a6
[]
no_license
Rsysz/TLR
ffa254ddd946ac275c068c15cb2a99aed5c19f84
e2a9c35c72108d09ea2fb31fc5b1da321f1d7605
refs/heads/master
2020-07-04T04:51:12.271793
2019-08-13T18:06:02
2019-08-13T18:06:02
202,161,734
0
0
null
null
null
null
UTF-8
Python
false
false
3,456
py
import os import glob import pandas as pd import xml.etree.ElementTree as ET def transfer_class(name): if (name == "Traffic Light"): return 0 elif (name == "Red"): return 1 elif (name == "Yellow"): return 2 elif (name == "Green"): return 3 elif (name == "Straight"): return 4 elif (name == "Left"): return 5 elif (name == "Right"): return 6 else: return 'error' def generate_csv(): xml_path = "Annotations" xml_list = [] for xml_file in glob.glob(xml_path + '/*.xml'): tree = ET.parse(xml_file) root = tree.getroot() for member in root.findall('object'): status_num = len(member.findall('status')) filename = root.find('filename').text classes = transfer_class(member[0].text) width = int(root.find('size')[0].text) height = int(root.find('size')[1].text) xmin = int(member[1][0].text) ymin = int(member[1][1].text) xmax = int(member[1][2].text) ymax = int(member[1][3].text) value = (filename, width, height, classes, xmin, ymin, xmax, ymax) xml_list.append(value) if (status_num): for i in range(status_num): classes = transfer_class(member[2+i][0].text) xmin = int(member[2+i][1][0].text) ymin = int(member[2+i][1][1].text) xmax = int(member[2+i][1][2].text) ymax = int(member[2+i][1][3].text) value = (filename, width, height, classes, xmin, ymin, xmax, ymax) xml_list.append(value) column_name = ['filename', 'width', 'height', 'class', 'xmin', 'ymin', 'xmax', 'ymax'] xml_df = pd.DataFrame(xml_list, columns=column_name) xml_df.to_csv('data/train_labels.csv', index=None) def xml_to_txt(): train = pd.read_csv(r'data/train_labels.csv', dtype=str) data = pd.DataFrame() data['format'] = [] # as the images are in train_images folder, add train_images before the image name # for i in range(data.shape[0]): # #data['format'][i] = 'D:/Datasets/' + data['format'][i] # data['format'][i] = data['format'][i] + ' ' # add xmin, ymin, xmax, ymax and class as per the format required index = 0 i = 0 tmp = train['filename'][0] data['format'][index] = train['filename'][0]+ ' '+ train['xmin'][i] + ',' + train['ymin'][i] + ',' + train['xmax'][i] + ',' + train['ymax'][i] + ',' + train['class'][i] for i in range(train.shape[0]): if (i==0): continue if (train['filename'][i] == tmp): data['format'][index] = data['format'][index] + ' '+ train['xmin'][i] + ',' + train['ymin'][i] + ',' + train['xmax'][i] + ',' + train['ymax'][i] + ',' + train['class'][i] else: index = index + 1 tmp = train['filename'][i] data['format'][index] = train['filename'][i] + ' '+ train['xmin'][i] + ',' + train['ymin'][i] + ',' + train['xmax'][i] + ',' + train['ymax'][i] + ',' + train['class'][i] data['format'].to_csv('annotate.txt', header=False, index=False) def main(): #generate_csv() xml_to_txt() main()
[ "noreply@github.com" ]
noreply@github.com
9f269a4fff9077b960987f031b39e54d5b25bf32
62b773144075571e53c18ba362a15cd098ed589f
/data/category.py
bb0adb8fdb61311d1c4b6f781c112f9fbcd0610e
[]
no_license
Viktorrya/web_progect
a0c884cb5fab4c13aaf550a62cad13528f765d92
750d0e3ab82a5121eefda6758ce86b415f616aad
refs/heads/master
2023-04-15T01:37:39.917568
2021-04-21T20:23:41
2021-04-21T20:23:41
356,915,984
0
0
null
null
null
null
UTF-8
Python
false
false
606
py
import sqlalchemy from .db_session import SqlAlchemyBase association_table = sqlalchemy.Table( 'association', SqlAlchemyBase.metadata, sqlalchemy.Column('news', sqlalchemy.Integer, sqlalchemy.ForeignKey('news.id')), sqlalchemy.Column('category', sqlalchemy.Integer, sqlalchemy.ForeignKey('category.id')) ) class Category(SqlAlchemyBase): __tablename__ = 'category' id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True, autoincrement=True) name = sqlalchemy.Column(sqlalchemy.String, nullable=True)
[ "vika_nazarova_05@inbox.ru" ]
vika_nazarova_05@inbox.ru
d3a903414652662f91ef2a9f09ed1a87342d49bf
15f321878face2af9317363c5f6de1e5ddd9b749
/solutions_python/Problem_201/436.py
78e4a99556ff805c431b31596155fa8617440523
[]
no_license
dr-dos-ok/Code_Jam_Webscraper
c06fd59870842664cd79c41eb460a09553e1c80a
26a35bf114a3aa30fc4c677ef069d95f41665cc0
refs/heads/master
2020-04-06T08:17:40.938460
2018-10-14T10:12:47
2018-10-14T10:12:47
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,143
py
f = open('C:\\Users\\djspence\\Downloads\\C-large.in', 'r') tries = int(f.readline()) for case in range(0, tries): lengths = {} vals = f.readline().strip().split(' ') n = int(vals[0]) remaining = int(vals[1]) lengths[n] = 1 small = 0 large = 0 while remaining > 0: lk = lengths.keys() maxLen = max(lk) num = lengths[maxLen] del lengths[maxLen] if maxLen%2 == 1: small = maxLen/2 large = maxLen/2 if small in lk: lengths[small]=lengths[small]+2*num else: lengths[small]=2*num else: small = maxLen/2-1 large = maxLen/2 if small in lk: lengths[small]=lengths[small]+num else: lengths[small]=num if large in lk: lengths[large]=lengths[large]+num else: lengths[large]=num remaining = remaining - num print("Case #" + str(case+1)+": " + str(large) + " " + str(small))
[ "miliar1732@gmail.com" ]
miliar1732@gmail.com
801d1fa1debdb15ffa16da844379ec376718c842
ac157f9f17356b6ff1fa31ae3051b2622f1e2365
/main/views.py
ddbe75a155bb6f0be5b6cb35d92306e5d7d2e81a
[]
no_license
Charu-mathi-N/Django
6b8bf0c0ce9a1acf8c14a5850499fe9c2f665e3b
5b9898d3caead1ab1d5b310904f4a6643cf95c8d
refs/heads/main
2022-12-26T13:33:19.973416
2020-10-11T15:29:07
2020-10-11T15:29:07
291,931,965
0
0
null
null
null
null
UTF-8
Python
false
false
2,620
py
from django.shortcuts import render, redirect from django.http import HttpResponse from django.shortcuts import render from .models import tutorial from django.contrib.auth.forms import UserCreationForm, AuthenticationForm from django.contrib.auth import login, logout, authenticate from django.contrib import messages from .forms import NewUserForm from django.contrib.auth import views as auth_views from . import views from django.db import models from django.db.models import Model # Create your views here. def homepage(request): return render(request = request, template_name = "main/home.html", context = {"tutorial": tutorial.objects.all} ) def register(request): if request.method == "POST": Form = NewUserForm(request.POST) if Form.is_valid(): user = Form.save() username = Form.cleaned_data.get('username') messages.success(request, f"Successfully Registered: {username}") login(request, user) return redirect("main:Homepage") else: for msg in Form.error_messages: messages.error(request, f"{msg}: {Form.error_messages[msg]}") return render(request = request, template_name = "main/register.html", context = {"form" : Form}) Form = NewUserForm return render(request = request, template_name = "main/register.html", context = {"form" : Form}) def login_request(request): if request.method == "POST": Form = AuthenticationForm(request, data = request.POST) if Form.is_valid(): username = Form.cleaned_data.get('username') password = Form.cleaned_data.get('password') user = authenticate(username = username, password = password) if user is not None: messages.success(request, f"Successfully Loggin: {username}") login(request, user) return redirect("main:Homepage") else: messages.error(request, "Invalid username or password") else: messages.error(request, "Invalid username or password") Form = AuthenticationForm() return render(request, "main/Login.html", {"form": Form}) def logout_request(request): logout(request) messages.info(request, "Logged out Successfully") return redirect("main:Homepage") def Reset(request): if request.method == 'POST': Form = models.EmailField(request, data = request.POST) if Form.is_valid(): email = Form.cleaned_data.get('email') if email is not None: messages.success(request, f"Sending reset link to: {email}") return render(request, "main/Reset.html", {'form': Form}) else: messages.error(request, "Invalid email_id") else: messages.error(request, "Invalid email_id") return render(request, 'main/Reset.html')
[ "70507486+Charu-mathi-N@users.noreply.github.com" ]
70507486+Charu-mathi-N@users.noreply.github.com
d43bf3fb05aa60d9b398ff7d573dab52da0c20d0
0fdbd907e952abbec7dfb20a4c3fd14587856067
/Que1.py
1c60c7fe539a1e10ec527c6c1eb5436370744a3b
[]
no_license
BYEONGSEOKKIM/algorithm
3de3cf76d746b728a398303cfaa54c2cf3195175
bd42475306f6e6e6600ded3817009157bf58b863
refs/heads/master
2023-07-07T10:13:52.377212
2021-08-09T08:54:17
2021-08-09T08:54:17
388,131,304
0
0
null
null
null
null
UTF-8
Python
false
false
1,788
py
## 큐 # def isQueueFull() : # global SIZE, queue, front, rear # if (rear >= SIZE-1) : # return True # else: # return False def isQueueFull() : global SIZE, queue, front, rear if (rear != SIZE -1) : return False elif (rear == SIZE-1) and (front == -1) : return True else : for i in range(front+1, SIZE, 1) : queue[i-1] = queue[i] queue[i] = None front -= 1 rear -= 1 return False def enQueue(data): global SIZE, queue, front, rear if (isQueueFull()) : print('큐 꽉!') return rear += 1 queue[rear] = data def isQueueEmpty() : global SIZE, queue, front, rear if (front == rear) : return True else: return False def deQueue() : global SIZE, queue, front, rear if (isQueueEmpty()) : print('큐 텅!') return None front += 1 data = queue[front] queue[front] = None return data def peek() : global SIZE, queue, front, rear if (isQueueEmpty()) : print('큐 텅!') return None return queue[front+1] ## 전역 SIZE = 5 queue = [None for _ in range(SIZE)] front, rear = -1, -1 ## 메인 enQueue('화사');enQueue('솔라') enQueue('문별');enQueue('휘인') enQueue('선미'); print('출구<---', queue, '<--입구') retData = deQueue() print('입장 손님-->',retData) retData = deQueue() print('입장 손님-->',retData) print('출구<---', queue, '<--입구') enQueue('재남') print('출구<---', queue, '<--입구') enQueue('혜리') print('출구<---', queue, '<--입구') enQueue('산적') print('출구<---', queue, '<--입구') # queue = ['화사', '솔라', '문별', '휘인', None] # front = -1 # rear = 3 # print(isQueueFull())
[ "kqw38598@gmail.com" ]
kqw38598@gmail.com
493a6991de254cce82c5fe80d81477fc8f7d330c
8d5c0a7c33b4ba6cfb34199a5f3bc3dafebc5068
/sistema/bu/migrations/0046_gallery.py
11067b7e097fabe3ceb93f45bcad881c92b3e1aa
[]
no_license
MauMaster/Pet-Aqui
b50ce6517c49eea20944f9aa3f2b41b837fa391e
e87efca8becc65a65c2a85fe3cc666aa5ba6652d
refs/heads/master
2022-12-19T08:12:54.980563
2019-06-23T16:42:17
2019-06-23T16:42:17
162,726,760
0
1
null
2022-11-22T02:23:44
2018-12-21T15:01:30
Python
UTF-8
Python
false
false
764
py
# Generated by Django 2.1.7 on 2019-03-09 23:16 from django.db import migrations, models import django.db.models.deletion import stdimage.models class Migration(migrations.Migration): dependencies = [ ('sistema', '0045_auto_20190308_1912'), ] operations = [ migrations.CreateModel( name='Gallery', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('gallery', stdimage.models.StdImageField(upload_to='')), ('titulo', models.CharField(max_length=50)), ('usuario_id', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='sistema.Usuario')), ], ), ]
[ "mauvirtual@gmail.com" ]
mauvirtual@gmail.com
8377094d248757f5f3f93eae38600898a3c2c105
3a4c1f2ca351b08608fcf9760dca996fd6a913bb
/ejemplo1.py
c3dc60717adef36c08710669265e48258f1329d0
[]
no_license
doppelsito/Octubre1-001
dace87bb1d32c2328d66d304b8166cec9e10f5e5
c83d090c81ac3ed70aeb59354002e2def37d057b
refs/heads/master
2022-12-28T23:20:15.223350
2020-10-05T13:02:16
2020-10-05T13:02:16
300,278,598
0
0
null
null
null
null
UTF-8
Python
false
false
509
py
a=int(input("Ingrese un numero: ")) b=int(input("Ingrese otro numero: ")) #Operaciones Matematicas suma= a+b multi= a*b print("La suma de "+ str(a) + " con el numero "+ str(b) +" es igual a:" +str(suma)) print("La Multiplicacion de "+ str(a) + " con el numero "+ str(b) +" es igual a:" +str(multi)) #creamos una condicion if (a>b): print("El numero "+ str(a) + " Es mayor que " + str(b)) elif (a<b): print("El numero "+ str(b) + " Es mayor que " + str(a)) else: print("Los numeros son iguales")
[ "maur.barriga@alumnos.duoc.cl" ]
maur.barriga@alumnos.duoc.cl
0514757ebf52548a774661c0e89dd759fca5ad93
ec221df89ecddfb38a3a1887152f3b4acdfdf259
/comments/views.py
c728e5e3d884a4ec8bbfe5880844a493699da617
[]
no_license
ThinkHao/Django-Blog
caedd39db70515a6256dea43b85351fea1906fbb
a58a19731e0048efa2c04ba0fd8aef2a6f4e7b6e
refs/heads/master
2020-04-25T14:48:25.498667
2019-02-27T08:34:12
2019-02-27T08:34:12
172,854,967
0
0
null
2019-02-27T08:34:13
2019-02-27T06:13:37
CSS
UTF-8
Python
false
false
799
py
from django.shortcuts import render, get_object_or_404, redirect from blog.models import Post from .models import Comment from .forms import CommentForm def post_comment(request, post_pk): post = get_object_or_404(Post, pk = post_pk) if request.method =='POST': form = CommentForm(request.POST) if form.is_valid(): comment = form.save(commit=False) comment.post = post comment.save() return redirect(post) else: comment_list = post.comment_set.all() context = {'post': post, 'form': form, 'comment_list': comment_list } return render(request, 'blog/detail.html', context=context) return redirect(post)
[ "qianqi611x@hotmail.com" ]
qianqi611x@hotmail.com
db166c5dcc339e356cf775d43a928a65440502ce
7130a96ef7c2199cdb52406069fdc5e015760d70
/components/docker/block/SPResnetBlockV2.py
858733a371f31bb60c735dd0184b8db52d6b793f
[]
no_license
yanqinghao/AiLab-Pytorch
c37e8f47241d7f1a003226b2a19b9406ff7f6f9b
ceea8a1196dca4d219a099cbaedcecf7c3f96564
refs/heads/master
2021-07-08T07:15:29.801492
2020-10-23T06:14:34
2020-10-23T06:14:34
198,990,470
0
0
null
2019-08-14T09:23:00
2019-07-26T09:40:58
Python
UTF-8
Python
false
false
734
py
# coding=utf-8 from __future__ import absolute_import, print_function import suanpan from suanpan.app.arguments import Int from suanpan.app import app from args import PytorchLayersModel from utils import getLayerName, net @app.input(PytorchLayersModel(key="inputModel")) @app.param(Int(key="inplanes", default=64)) @app.param(Int(key="planes", default=64)) @app.output(PytorchLayersModel(key="outputModel")) def SPResnetBlockV2(context): args = context.args model = args.inputModel name = getLayerName(model.layers, "ResnetBlockV2") setattr(model, name, net.ResnetBlockV2(args.inplanes, args.planes)) model.layers[name] = getattr(model, name) return model if __name__ == "__main__": suanpan.run(app)
[ "woshiyanqinghao@gmail.com" ]
woshiyanqinghao@gmail.com
2461531d4bea637f7d5827743cb3e450faf361d6
d310b6c5f3cfa1fd26cea3a4c257e5c1f8fa7f5d
/testDjango/urls.py
c70dcdb06772ecb03bb0deec39607499428a9bab
[ "MIT" ]
permissive
yxys01/testDjango
fe42c9368b2822e5b5571e7afe7c8127f67fea03
4facfa3b5958bf9f7276237dedeea2382de0a6e8
refs/heads/master
2022-04-24T03:06:22.592524
2020-04-25T03:46:45
2020-04-25T03:46:45
258,681,180
0
0
null
null
null
null
UTF-8
Python
false
false
1,294
py
"""testDjango URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/3.0/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path from firstWEB import views urlpatterns = [ # 前一个参数:访问的地址 # admin.site.urls 你要调用的映射到views里面的文件 path('admin/', admin.site.urls), # 映射到views的index功能上 # index是访问网页的路径:http://127.0.0.1:8000/index/ 可以访问到目标网页 path('index/', views.index), # http://127.0.0.1:8000/abc 可以访问到目标网页 # path('abc', views.index) path('calpage/', views.CalPage), # 路由部分 path('cal', views.Cal), path('list', views.CalList), path('del', views.DelData) ]
[ "420393969@qq.com" ]
420393969@qq.com
95b7481abd5da44b653139b6e671965a8b6bc81e
2f98aa7e5bfc2fc5ef25e4d5cfa1d7802e3a7fae
/python/python_24692.py
165e8518e2ba9cad5538a7ef480b9d654979df4a
[]
no_license
AK-1121/code_extraction
cc812b6832b112e3ffcc2bb7eb4237fd85c88c01
5297a4a3aab3bb37efa24a89636935da04a1f8b6
refs/heads/master
2020-05-23T08:04:11.789141
2015-10-22T19:19:40
2015-10-22T19:19:40
null
0
0
null
null
null
null
UTF-8
Python
false
false
63
py
# Python NameError: name 'self' is not defined Why? python -tt
[ "ubuntu@ip-172-31-7-228.us-west-2.compute.internal" ]
ubuntu@ip-172-31-7-228.us-west-2.compute.internal
d66bf0bdc8c0d57ac20c68440808e207cb9dde0e
37072be52ef0235667ad39b0fc732c4735a95a10
/supervisor/app/Require_value.py
295f7210ecee1ebc8d108eb01f0b150a51636e63
[]
no_license
zyt325/DailyDocker
6af8b50f19b8eb957d6214f52e1b408446afdfd2
a54038f3dbdeec2eb9bc746d77b0215dd7fd3781
refs/heads/master
2023-04-25T03:26:32.973288
2021-05-20T01:23:28
2021-05-20T01:23:28
369,043,767
0
0
null
null
null
null
UTF-8
Python
false
false
2,982
py
# -*- encoding:utf-8 -*- def Credentials(class_type='type', class_host='host', class_key='key'): credentials = {} # class_type: class_host: class_key: class_value:; credentials.setdefault('DB_z', {}).setdefault('test', {}).update( {'host': '127.0.0.1', 'user': 'zyt', 'passwd': '325', 'dbase': 'mysql', 'port': '3306'}) credentials.setdefault('DB_z', {}).setdefault('db', {}).update( {'host': 'db.base-fx.com', 'user': 'root', 'passwd': 'basefx12', 'dbase': 'mysql', 'port': '3306'}) credentials.setdefault('DB_z', {}).setdefault('db08', {}).update( {'host': 'db08.base-fx.com', 'user': 'root', 'passwd': 'basefx12', 'dbase': 'mysql', 'port': '3306'}) credentials.setdefault('DB_z', {}).setdefault('all', {}).update( {'host': '', 'user': 'root', 'passwd': 'basefx12', 'dbase': 'mysql', 'port': '3306'}) credentials.setdefault('DB_z', {}).setdefault('test', {}).update( {'host': '127.0.0.1', 'user': 'zyt', 'passwd': '325', 'dbase': 'mysql', 'port': '3306'}) credentials.setdefault('DB_z', {}).setdefault('vps', {}).update( {'host': 'note.personer.tech', 'user': 'zyt', 'passwd': '325', 'dbase': 'mysql', 'port': '3306'}) credentials.setdefault('DB_z', {}).setdefault('localhost', {}).update( {'host': 'localhost', 'user': 'root', 'passwd': '', 'dbase': 'mysql', 'port': '3306'}) credentials.setdefault('DB_z', {}).setdefault('docker03', {}).update( {'host': 'docker03.base-fx.com', 'user': 'root', 'passwd': 'mysql325', 'dbase': 'mysql', 'port': '3306'}) credentials.setdefault('PostgreSQL', {}).setdefault('sg-db01', {}).update( {'host': 'sg-db01.base-fx.com', 'user': 'com_base_fx_shotgun_prod', 'passwd': 'CHYlYIAWphYkjKl76Pj0', 'dbase': 'com_base_fx_shotgun_prod', 'port': '5432'}) credentials.setdefault('LDAP_z', {}).setdefault('test', {}).update( {'host': '10.14.6.170', 'user': 'cn=zyt_ad,ou=Basers,dc=ad,dc=bfx,dc=com', 'passwd': 'zyt_ad#325', 'root_dn': 'ou=Basers,dc=ad,dc=bfx,dc=com'}) credentials.setdefault('LDAP_z', {}).setdefault('dc09', {}).update( {'host': 'dc09.base-fx.com', 'user': 'cn=zhangyt,ou=NON,ou=BJ,ou=Basers,dc=ad,dc=base-fx,dc=com', 'passwd': 'b!onpJ32', 'root_dn': 'ou=Basers,dc=ad,dc=base-fx,dc=com'}) credentials.setdefault('SSH_z', {}).setdefault('vps', {}).update( {'host': 'personer.tech', 'user': 'root', 'passwd': 'zyt#vps325'}) credentials.setdefault('WIN_z', {}).setdefault('test', {}).update( {'host': '10.14.6.194', 'user': 'administrator', 'passwd': 'Base.f17d'}) credentials.setdefault('WIN_z', {}).setdefault('all', {}).update( {'host': '', 'user': 'administrator', 'passwd': 'Base.f17d'}) if credentials[class_type].get(class_host): return credentials[class_type][class_host][class_key] elif class_key == 'host': return class_host else: return credentials[class_type]['all'][class_key]
[ "zhangyt@base-fx.com" ]
zhangyt@base-fx.com
f98db3544d549e03a0ff413e118f2bd620ddf2ee
cb38b170cc716d812822c8fdf64da99e154e7e77
/Daily Coding Problem/venv/Scripts/pip3-script.py
feb8e8c5c62025fbba1729354a447c379ef50e7d
[]
no_license
ayush-sah/Python
9227b2819083d0c1fce4fa60a62b167c74a14172
e17b43d2f4d53f4490630fc13a7defaafcf9ea28
refs/heads/master
2021-07-08T21:29:35.031374
2021-04-21T07:59:17
2021-04-21T07:59:17
228,924,078
0
0
null
null
null
null
UTF-8
Python
false
false
428
py
#!"C:\Users\Ayush\PycharmProjects\Daily Coding Problem\venv\Scripts\python.exe" # EASY-INSTALL-ENTRY-SCRIPT: 'pip==19.0.3','console_scripts','pip3' __requires__ = 'pip==19.0.3' import re import sys from pkg_resources import load_entry_point if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit( load_entry_point('pip==19.0.3', 'console_scripts', 'pip3')() )
[ "ayush.sah@spit.ac.in" ]
ayush.sah@spit.ac.in
aa76333dce449dddad0902a8573b49da71de2bc9
fa1810962cdb5304f32fbb869731638c02974f5d
/pattern_count.py
a0e57f9cf4bafd85bbd980cfbeb1c2287c126b1c
[]
no_license
ahishsujay/Biology_Meets_Programming_Bioinformatics_for_Beginners
f347df646d9e8b943ff486a7a5b1bcf02833f29d
c99c3660a41327670df1c717db202974462c13be
refs/heads/master
2022-04-01T16:04:10.009108
2019-12-30T05:28:16
2019-12-30T05:28:16
230,821,750
0
0
null
null
null
null
UTF-8
Python
false
false
280
py
def PatternCount(Text,Pattern): '''Function to calculate the occurence of Pattern in Text''' count = 0 for i in range(0,len(Text)-len(Pattern)+1): #+1 is added due to inclusiveness if Text[i:i+len(Pattern)] == Pattern: count += 1 return(count)
[ "ahish278@gmail.com" ]
ahish278@gmail.com
2a04c078859847f83b2a810252c0bd0a2a0367e9
da052c0bbf811dc4c29a83d1b1bffffd41becaab
/core/web_debranding/__manifest__.py
2626a321be85b590c2375e95e0b69f7ad52c0bfc
[]
no_license
Muhammad-SF/Test
ef76a45ad28ac8054a4844f5b3826040a222fb6e
46e15330b5d642053da61754247f3fbf9d02717e
refs/heads/main
2023-03-13T10:03:50.146152
2021-03-07T20:28:36
2021-03-07T20:28:36
null
0
0
null
null
null
null
UTF-8
Python
false
false
824
py
# -*- coding: utf-8 -*- { 'name': "Backend debranding", 'version': '1.1.1', 'author': 'IT-Projects LLC, Ivan Yelizariev', 'license': 'LGPL-3', 'category': 'Debranding', 'images': ['images/web_debranding.png'], 'website': 'https://twitter.com/yelizariev', 'price': 150.00, 'currency': 'EUR', 'depends': [ 'web', 'mail', 'web_planner', 'access_apps', 'access_settings_menu', 'mail_base', ], 'data': [ 'security/web_debranding_security.xml', 'security/ir.model.access.csv', 'data.xml', 'views.xml', 'js.xml', 'pre_install.yml', ], 'qweb': [ 'static/src/xml/web.xml', ], 'auto_install': False, 'uninstall_hook': 'uninstall_hook', 'installable': True }
[ "jbalu2801@gmail.com" ]
jbalu2801@gmail.com
08dfdc25dd5b6e725862d224f73a2f026bbb10bd
8da2a3f13cf5051275c38ecf2c5a22f71e8a7ebf
/Yield Curve Animation/yield_curve.py
702fb7937c78d0e668578526086efc6518913dda
[]
no_license
letsgoexploring/fredclass
fa615e84edae13cb36f73e73dd166f0d8afa6ca3
4fb9657c8909224d088c194e77560cda8e1b11c9
refs/heads/master
2020-12-12T21:00:53.353516
2015-06-23T20:58:12
2015-06-23T20:58:12
23,480,045
5
0
null
null
null
null
UTF-8
Python
false
false
929
py
import fredclass, urllib, dateutil import matplotlib.pyplot as plt import matplotlib.dates as dts import numpy as np # date example: '1981-01-14' def yc(date,all_yields=None): yields=[] if all_yields == None: y1m= fredclass.fred('DTB4WK') y3m= fredclass.fred('DTB3') y6m= fredclass.fred('DTB6') y1 = fredclass.fred('DGS1') y5 = fredclass.fred('DGS5') y10= fredclass.fred('DGS10') y20= fredclass.fred('DGS20') y30= fredclass.fred('DGS30') all_yields = [y1m,y3m,y6m,y1,y5,y10,y20,y30] for n,x in enumerate(all_yields): '''A doc string.''' try: index = x.dates.index(date) yields.append(x.data[index]) except ValueError: index = -1000 yields.append('NaN') yields= np.array(yields) y2 = yields.astype(np.double) ymask = np.isfinite(y2) mat = np.array([0,1,2,2.5,4,5,5.5,6]) d1 = dateutil.parser.parse(date) d_str = d1.strftime('%B %d, %Y') return d_str, mat,yields,ymask
[ "letsgoexploring@gmail.com" ]
letsgoexploring@gmail.com
6997ba18d8ad2fb05c77cb9cbd2942726bf65798
fd4aba49cbd4042a95e7376eac245df0e95b72d3
/auto-generated/python/test/test_margin.py
a5287ac7cde2c798af31194cd8a629e51b3cef2c
[]
no_license
bretton/api-connectors
47755e7ec4701a600b3bf6a541c618573e97e365
e8b9de34ff941c3edae2b094f6ab0eb1c24bf8bb
refs/heads/master
2020-04-14T20:01:38.746415
2019-12-20T11:43:05
2019-12-20T11:43:05
164,079,343
2
2
null
2019-12-20T11:43:06
2019-01-04T08:21:45
C++
UTF-8
Python
false
false
2,277
py
# coding: utf-8 """ BitMEX API ## REST API for the BitMEX Trading Platform [View Changelog](/app/apiChangelog) #### Getting Started Base URI: [https://www.bitmex.com/api/v1](/api/v1) ##### Fetching Data All REST endpoints are documented below. You can try out any query right from this interface. Most table queries accept `count`, `start`, and `reverse` params. Set `reverse=true` to get rows newest-first. Additional documentation regarding filters, timestamps, and authentication is available in [the main API documentation](/app/restAPI). *All* table data is available via the [Websocket](/app/wsAPI). We highly recommend using the socket if you want to have the quickest possible data without being subject to ratelimits. ##### Return Types By default, all data is returned as JSON. Send `?_format=csv` to get CSV data or `?_format=xml` to get XML data. ##### Trade Data Queries *This is only a small subset of what is available, to get you started.* Fill in the parameters and click the `Try it out!` button to try any of these queries. * [Pricing Data](#!/Quote/Quote_get) * [Trade Data](#!/Trade/Trade_get) * [OrderBook Data](#!/OrderBook/OrderBook_getL2) * [Settlement Data](#!/Settlement/Settlement_get) * [Exchange Statistics](#!/Stats/Stats_history) Every function of the BitMEX.com platform is exposed here and documented. Many more functions are available. ##### Swagger Specification [⇩ Download Swagger JSON](swagger.json) ## All API Endpoints Click to expand a section. # noqa: E501 OpenAPI spec version: 1.2.0 Contact: support@bitmex.com Generated by: https://github.com/swagger-api/swagger-codegen.git """ from __future__ import absolute_import import unittest import swagger_client from swagger_client.models.margin import Margin # noqa: E501 from swagger_client.rest import ApiException class TestMargin(unittest.TestCase): """Margin unit test stubs""" def setUp(self): pass def tearDown(self): pass def testMargin(self): """Test Margin""" # FIXME: construct object with mandatory attributes with example values # model = swagger_client.models.margin.Margin() # noqa: E501 pass if __name__ == '__main__': unittest.main()
[ "samuel.trace.reed@gmail.com" ]
samuel.trace.reed@gmail.com
222c27fed7d206044563f38816d6d87261e0f34b
aeb69456c4e6f2238c947ae426d346aad033d598
/python/268.缺失数字.py
782b23d882a2f414dfc1c490783634a33db502ec
[]
no_license
ElonXie/LeetCode-Practice
f2c345cadce8d60515343ee94f52de5f34477d81
7a54fc8f85e3e7f937bb504a8f4c6de6dd7da3e2
refs/heads/master
2021-05-16T21:09:11.231951
2020-06-21T03:39:12
2020-06-21T03:39:12
250,470,089
0
0
null
null
null
null
UTF-8
Python
false
false
915
py
# # @lc app=leetcode.cn id=268 lang=python3 # # [268] 缺失数字 # # https://leetcode-cn.com/problems/missing-number/description/ # # algorithms # Easy (54.29%) # Likes: 219 # Dislikes: 0 # Total Accepted: 50.8K # Total Submissions: 93.6K # Testcase Example: '[3,0,1]' # # 给定一个包含 0, 1, 2, ..., n 中 n 个数的序列,找出 0 .. n 中没有出现在序列中的那个数。 # # 示例 1: # # 输入: [3,0,1] # 输出: 2 # # # 示例 2: # # 输入: [9,6,4,2,3,5,7,0,1] # 输出: 8 # # # 说明: # 你的算法应具有线性时间复杂度。你能否仅使用额外常数空间来实现? # # from typing import List # @lc code=start class Solution: def missingNumber(self, nums: List[int]) -> int: # 1.求和再求差 expected_sum = len(nums)*(len(nums)+1)//2 actual_sum = sum(nums) return expected_sum-actual_sum # 2.异或 # @lc code=end
[ "sdaxdh@163.com" ]
sdaxdh@163.com
60356fe0fc529813a65d6b737ae74b0c6145361b
973207c11590e932d0868a2b6dbe8c081efaa452
/ejemplo4.py
a93731b89eda7af364bbbcb616e1051f39e105e8
[]
no_license
samfiner/ejemplopython
41bd7af4d600f55970cf351c40eb6c81c1eafdd6
18bba74c51603b9235d796132803a5658110fd9b
refs/heads/master
2020-06-23T19:13:20.425216
2019-08-08T22:39:53
2019-08-08T22:39:53
198,728,089
0
0
null
null
null
null
UTF-8
Python
false
false
522
py
num1=int(input("dijite el primer numero: ")) num2=int(input("dijite el segundo numero: ")) operacion=int(input("dijite 1 para suma, 2 para resta, 3 para multiplicacion y 4 para division: ")) if operacion==1: print(" la suma es:", (num1+num2)) elif operacion==2: print(" la resta es:", (num1-num2)) elif operacion==3: print(" la multiplicacion es:", (num1*num2)) elif operacion==4: print(" la division es:", (num1/num2)) else: print(" por favo digite un valor correcto para la operacion")
[ "juanmc32145@gmail.com" ]
juanmc32145@gmail.com
3c4576f5ddbaa3c48af78c475ce38b2bd8b8f920
9561e483f18e1c42f1e6d53c3f3e9634ae6010fd
/ChapterV/Quiz/V.3-1-Random-Code.py
45cd508576e9ecf29563d1fc3b177b85bd93936f
[]
no_license
china-university-mooc/Python-Basics
e90f0bbf20e448e29264883ca50b32d1ef388514
60b47d8e60788c798593e5cbbdcf5bb8931bc3b8
refs/heads/master
2021-03-14T00:00:51.804219
2020-04-24T16:15:36
2020-04-24T16:15:36
246,722,336
0
1
null
null
null
null
UTF-8
Python
false
false
239
py
import random def genpwd(length): start = pow(10, length - 1) end = pow(10, length) - 1 num = random.randint(start, end) return str(num) length = eval(input()) random.seed(17) for i in range(3): print(genpwd(length))
[ "zhaozhang@thoughtworks.com" ]
zhaozhang@thoughtworks.com
92bcfa6ce67739e1cc047aaee172267236b38d3e
40aa9e67ca10c39b7d2646c28b7dc1ec31da8f3e
/VCAnalytics/bin/wheel
0d146325f0acc7177dff192cbb11e73255ac5fef
[]
no_license
christopdr/venture_capital
096daed052d54293fbc55a0aa5f9bc7094e45f44
3d7412153a9849c4e647d04260d15775eb0ba126
refs/heads/master
2020-05-04T19:43:32.775771
2019-05-18T06:29:19
2019-05-18T06:29:19
179,405,280
0
2
null
2019-05-18T06:29:20
2019-04-04T02:18:29
Python
UTF-8
Python
false
false
277
#!/Users/Christopher/Documents/GitHub/venture_capital/venture_capital_app/bin/python # -*- coding: utf-8 -*- import re import sys from wheel.cli import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "christopdr@users.noreply.github.com" ]
christopdr@users.noreply.github.com
f820276f18fc4c046f94b5b1dc6f7996facd91e2
0380b8b08838128f57e2eed713a7e67578538d27
/1.Flexible movement policy/4.Corr_random_50a/1.Corr_rand_50a_30cm/funct_30cm_full_mov_50a.py
5340de3b68dfb5b18a392bd953bfa20ec29822bd
[ "MIT" ]
permissive
GustavoHdezM/Supermarket-environment-for-COVID-19-mitigation
edf3291a7f6dba8c66a89712b941fb14aa968300
61d08151d4d6522eeab327b76b693dcd0f0e70c7
refs/heads/master
2022-11-30T04:42:46.296290
2020-08-06T07:55:24
2020-08-06T07:55:24
279,936,577
0
1
null
null
null
null
UTF-8
Python
false
false
64,704
py
# -*- coding: utf-8 -*- """ Functions Code Platform for testing potential COVID-19 spread in Supermarkets @author: Gustavo Hernandez Mejia """ import matplotlib.pyplot as plt import math import random prop_cycle = plt.rcParams['axes.prop_cycle'] #Colors colors = prop_cycle.by_key()['color'] s1=1000 # 3500 s2=1900 alpha1=0.20 #_____________________ Scene 1 GENERAL MODEL___________________________________ """ Agents and input parameters """ #I1 = [2,2] # Infected # #P1 = [5,2] # Persons #P2 = [15,2] #P3 = [22,2] #P4 = [27,2] #P5 = [27,6.5] #P6 = [7.5,14] #fix #P7 = [8.5,14] #fix #P8 = [27,12] #P9 = [15,10] #P10 = [18,6] #P11 = [17,14] #P12 = [20,14] #P13 = [23,10] # #P14 = [8.5,11] #fix #P15 = [8.5,6] #fix # #CO1 = [6,15] # Check Out (CO) #CO2 = [6,12] #CO3 = [6,9] # Trajectories #T1 = [[1,2],[2,2],[3,2],[4,2],[5,2],[6,2],[7,2],[8,2],[9,2],[10,2], # [11,2],[12,2]] addi = 0.3 ext = 4 T1 = [] sis_1 = 12 * ext for i in range(0, 40): T1.append([(i*addi)+addi,2]) T2 = [] sis_2 = 15 * ext for i in range(1, 50): T2.append([(i*addi)+12,2]) for i in range(0, 4*2+6): T2.append([27,(i*addi)+2]) T3 = [] sis_3 = 4 * ext for i in range(0, 13): T3.append([27,(i*addi)+6.2]) #T4 = [[26,6],[25,6],[24,6],[23,6],[22,6],[21,6],[20,6]] T4 = [] sis_4 = 7 * ext for i in range(24, 0, -1): T4.append([(i*addi)+19.7,6]) #T5 = [[19.5,6],[18,6],[17,6],[16,6],[15,6],[14,6],[13,6],[12,6]] T5 = [] sis_5 = 8 * ext for i in range(27, 0,-1): T5.append([(i*addi)+11.7,6]) #T6 = [[27,10.5],[27,12],[27,13],[27,14],[26,14],[25,14],[24,14],[23,14],[22,14], # [21,14],[20,14]] T6 = [] sis_6 = 4 * ext for i in range(0, 13): T6.append([27,(i*addi)+10.2]) for i in range(24, 0, -1): T6.append([(i*addi)+19.7,14]) #T7 = [[26.5,10],[25,10],[24,10],[23,10],[22,10],[21,10],[20,10]] T7 = [] sis_7 = 7 * ext for i in range(sis_7-4, 0,-1): T7.append([(i*addi)+19.7,10]) #T8 = [[20,10.5],[20,12],[20,13.5]] T8 = [] sis_8 = 4 * ext for i in range(0, sis_8-3): T8.append([20,(i*addi)+10.3]) #T9 = [[20,6.5 7],[20,8],[20,9.5]] T9 = [] sis_9 = 4 * ext for i in range(0, sis_9-3): T9.append([20,(i*addi)+6.3]) #T10= [[19.5,10],[18,10],[17,10],[16,10],[15,10],[14,10],[13,10],[12,10]] T10 = [] sis_10 = 8 * ext for i in range(sis_10-5, 0,-1): T10.append([(i*addi)+11.7,10]) #T11= [[19,14],[18,14],[17,14],[16,14],[15,14],[14,14],[13,14],[12,14]] T11 = [] sis_11 = 8 * ext for i in range(sis_11-5, 0,-1): T11.append([(i*addi)+11.7,14]) #T12= [[12,11],[12,12],[12,13]] T12 = [] sis_12 = 3 * ext for i in range(0, sis_12+1): T12.append([12,(i*addi) + 10.3]) #T13= [[12,7],[12,8],[12,9]] T13 = [] sis_13 = 3 * ext for i in range(0, sis_13+1): T13.append([12,(i*addi) + 6.3]) #T14= [[12,3],[12,4],[12,5]] T14 = [] sis_14 = 3 * ext for i in range(0, sis_14+1): T14.append([12,(i*addi) + 2.2]) #T15= [[11,6],[10,6],[9,6],[8,6],[7,6],[6,6],[5,6]] T15 = [] sis_15 = 8 * ext T15.append([11,7]) T15.append([10.5,7.5]) T15.append([11.5,6.5]) for i in range(sis_15-12, 0,-1): T15.append([(i*addi)+4,8]) #T16= [[11,10.5],[10,11],[9,11],[8,11],[7,11],[6,11],[5,11]] T16 = [] sis_16 = 6 * ext T16.append([11.7,10.2]) T16.append([11.4,10.5]) T16.append([11.1,10.8]) for i in range(sis_16-1, 0,-1): T16.append([(i*addi)+4,11]) #T17= [[11,14],[10,14],[9,14],[8,14],[7,14],[6,14],[5,14]] T17 = [] sis_17 = 7 * ext for i in range(sis_17-2, 0,-1): T17.append([(i*addi)+4,14]) T18 = [] sis_18 = 8 * ext for i in range(sis_18-6, 0,-1): T18.append([4,(i*addi)+6]) for i in range(13, 0,-1): T18.append([(i*addi)+0,6]) Min_dist = 1.4 # 1 m No_content_prob = 0.5 # Bigger circle original 0.5 content_prob = 0.35 # Smaller circle #U_Pop = [P1, P2, P3, P4, P5, P6, P7, P8, P9, P10, P11, P12, P13, P14, P15, # CO1, CO2, CO3] #I_Pop = [I1] Agent_j = [] def T_traj(traj,U_Pop,I_Pop,Per): paso = 1 if traj == T1: ind = T1.index(Per) + paso if ind <= (len(T1)-1): Per = T1[T1.index(Per) + paso] next_traj = T1 # print(f'T1:{Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T2 Per = T2[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T14 Per = T14[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j # for i in range(len(traj)): # for j in range(len(U_Pop)): # h = (traj[i][0]- U_Pop[j][0])**2 + (traj[i][1]- U_Pop[j][1])**2 # Dist = math.sqrt(h) # if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): # I_Pop.append(U_Pop[j]) # Agent_j.append(j) # indices1 = Agent_j # U_Pop = [i for j, i in enumerate(U_Pop) if j not in indices] # Normal # if traj == T1: ## print('T1') # if (random.random() < 0.5): # next_traj = T2 # Per = T2[0] # else: # next_traj = T14 # Per = T14[0] if traj == T2: # print('T2') ind = T2.index(Per) + paso if ind <= (len(T2)-1): Per = T2[T2.index(Per) + paso] next_traj = T2 # print(f'T2: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T3 Per = T3[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T4 Per = T4[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T3: # print('T3') ind = T3.index(Per) + paso if ind <= (len(T3)-1): Per = T3[T3.index(Per) + paso] next_traj = T3 # print(f'T3: {Per}') # print(f'T1, {P}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T6 Per = T6[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T7 Per = T7[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T4: # print('T4') ind = T4.index(Per) + paso if ind <= (len(T4)-1): Per = T4[T4.index(Per) + paso] next_traj = T4 for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T5 Per = T5[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T9 Per = T9[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T5: # print('T5') ind = T5.index(Per) + paso if ind <= (len(T5)-1): Per = T5[T5.index(Per) + paso] next_traj = T5 # print(f'T5: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T14[::-1] Per = T14[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) elif (random.random() < 0.5): next_traj = T13 Per = T13[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) # indices1 = Agent_j else: next_traj = T15 Per = T15[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T6: # print('T6') ind = T6.index(Per) + paso if ind <= (len(T6)-1): Per = T6[T6.index(Per) + paso] next_traj = T6 # print(f'T6: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T11 Per = T11[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T8[::-1] Per = T8[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T7: # print('T7') ind = T7.index(Per) + paso if ind <= (len(T7)-1): Per = T7[T7.index(Per) + paso] next_traj = T7 # print(f'T7: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T9[::-1] Per = T9[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j elif (random.random() < 0.5): next_traj = T8 Per = T8[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T10 Per = T10[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T8: # print('T8') ind = T8.index(Per) + paso if ind <= (len(T8)-1): Per = T8[T8.index(Per) + paso] next_traj = T8 # print(f'T8: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T11 Per = T11[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T9: # print('T9') ind = T9.index(Per) + paso if ind <= (len(T9)-1): Per = T9[T9.index(Per) + paso] next_traj = T9 # print(f'T9: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T7[::-1] Per = T7[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j elif (random.random() < 0.5): next_traj = T8 Per = T8[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T10 Per = T10[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T10: # print('T10') ind = T10.index(Per) + paso if ind <= (len(T10)-1): Per = T10[T10.index(Per) + paso] next_traj = T10 # print(f'T10: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T13[::-1] Per = T13[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j elif (random.random() < 0.5): next_traj = T16 Per = T16[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T12 Per = T12[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T11: # print('T11') ind = T11.index(Per) + paso if ind <= (len(T11)-1): Per = T11[T11.index(Per) + paso] next_traj = T11 # print(f'T11: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T17 Per = T17[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T12[::-1] Per = T12[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T12: # print('T12') ind = T12.index(Per) + paso if ind <= (len(T12)-1): Per = T12[T12.index(Per) + paso] next_traj = T12 # print(f'T12: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T17 Per = T17[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T11[::-1] Per = T11[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T13: # print('T13') ind = T13.index(Per) + paso if ind <= (len(T13)-1): Per = T13[T13.index(Per) + paso] next_traj = T13 # print(f'T13: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T16 Per = T16[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T10[::-1] Per = T10[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T14: # print('T14') ind = T14.index(Per) + paso if ind <= (len(T14)-1): Per = T14[T14.index(Per) + paso] next_traj = T14 # print(f'T14: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T13 Per = T13[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j elif (random.random() < 0.5): next_traj = T5[::-1] Per = T5[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T15 Per = T15[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T15: # print('T15') ind = T15.index(Per) + paso if ind <= (len(T15)-1): Per = T15[T15.index(Per) + paso] next_traj = T15 # print(f'T15: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T18 Per = T18[19] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T16: # print('T16') ind = T16.index(Per) + paso if ind <= (len(T16)-1): Per = T16[T16.index(Per) + paso] next_traj = T16 # print(f'T16: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T18 Per = T18[9] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T17: # print('T17') ind = T17.index(Per) + paso if ind <= (len(T17)-1): Per = T17[T17.index(Per) + paso] next_traj = T17 # print(f'T17: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T18 Per = T18[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T18: ind = T18.index(Per) + paso if ind <= (len(T18)-1): Per = T18[T18.index(Per) + paso] next_traj = T18 # print(f'T18: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: # print('T17') next_traj = 0 # Inverted direction if traj == T3[::-1]: # print('T3-') ind = T3[::-1].index(Per) + paso if ind <= (len(T3[::-1])-1): Per = T3[::-1][T3[::-1].index(Per) + paso] next_traj = T3[::-1] # print(f'T3-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T4 Per = T4[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T4[::-1]: # print('T4-') ind = T4[::-1].index(Per) + paso if ind <= (len(T4[::-1])-1): Per = T4[::-1][T4[::-1].index(Per) + paso] next_traj = T4[::-1] # print(f'T4-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T3 Per = T3[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T5[::-1]: # print('T5-') ind = T5[::-1].index(Per) + paso if ind <= (len(T5[::-1])-1): Per = T5[::-1][T5[::-1].index(Per) + paso] next_traj = T5[::-1] # print(f'T5-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T9 Per = T9[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T4[::-1] Per = T4[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T7[::-1]: # print('T7-') ind = T7[::-1].index(Per) + paso if ind <= (len(T7[::-1])-1): Per = T7[::-1][T7[::-1].index(Per) + paso] next_traj = T7[::-1] # print(f'T7-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T6 Per = T6[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T3[::-1] Per = T3[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T8[::-1]: # print('T8-') ind = T8[::-1].index(Per) + paso if ind <= (len(T8[::-1])-1): Per = T8[::-1][T8[::-1].index(Per) + paso] next_traj = T8[::-1] # print(f'T8-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T7[::-1] Per = T7[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j elif (random.random() < 0.5): next_traj = T9[::-1] Per = T9[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T10 Per = T10[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T9[::-1]: # print('T9-') ind = T9[::-1].index(Per) + paso if ind <= (len(T9[::-1])-1): Per = T9[::-1][T9[::-1].index(Per) + paso] next_traj = T9[::-1] # print(f'T9-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T5 Per = T5[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T4[::-1] Per = T4[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T10[::-1]: # print('T10-') ind = T10[::-1].index(Per) + paso if ind <= (len(T10[::-1])-1): Per = T10[::-1][T10[::-1].index(Per) + paso] next_traj = T10[::-1] # print(f'T10-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T7[::-1] Per = T7[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j elif (random.random() < 0.5): next_traj = T9[::-1] Per = T9[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T8 Per = T8[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T11[::-1]: # print('T11-') ind = T11[::-1].index(Per) + paso if ind <= (len(T11[::-1])-1): Per = T11[::-1][T11[::-1].index(Per) + paso] next_traj = T11[::-1] # print(f'T11-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T8[::-1] Per = T8[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T12[::-1]: # print('T12-') ind = T12[::-1].index(Per) + paso if ind <= (len(T12[::-1])-1): Per = T12[::-1][T12[::-1].index(Per) + paso] next_traj = T12[::-1] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T16 Per = T16[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T10[::-1] Per = T10[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j if traj == T13[::-1]: # print('T13-') ind = T13[::-1].index(Per) + paso if ind <= (len(T13[::-1])-1): Per = T13[::-1][T13[::-1].index(Per) + paso] next_traj = T13[::-1] # print(f'T13-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: if (random.random() < 0.5): next_traj = T15 Per = T15[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) indices1 = Agent_j else: next_traj = T5[::-1] Per = T5[::-1][0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) # indices1 = Agent_j if traj == T14[::-1]: # print('T14-') ind = T14[::-1].index(Per) + paso if ind <= (len(T14[::-1])-1): Per = T14[::-1][T14[::-1].index(Per) + paso] next_traj = T14[::-1] # print(f'T14-: {Per}') for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) # indices1 = Agent_j else: next_traj = T2 Per = T2[0] for j in range(len(U_Pop)): h = (Per[0]- U_Pop[j][0])**2 + (Per[1]- U_Pop[j][1])**2 Dist = math.sqrt(h) if (Dist <= Min_dist) and (random.random() < No_content_prob) and (U_Pop[j] not in I_Pop): I_Pop.append(U_Pop[j]) Agent_j.append(j) # indices1 = Agent_j return next_traj, I_Pop, Per def U_traj(traj,P): paso = 1 if traj == T1: ind = T1.index(P) + paso if ind <= (len(T1)-1): P = T1[T1.index(P) + paso] next_traj = T1 # print(f'T1, {P}') else: if (random.random() < 0.5): next_traj = T2 P = T2[0] # print(f'T2, {P}') else: next_traj = T14 P = T14[0] # print(f'T14, {P}') if traj == T2: ind = T2.index(P) + paso if ind <= (len(T2)-1): P = T2[T2.index(P) + paso] next_traj = T2 # print(f'T2, {P}') else: if (random.random() < 0.5): next_traj = T3 P = T3[0] # print(f'T3, {P}') else: next_traj = T4 P = T4[0] # print(f'T4, {P}') if traj == T3: ind = T3.index(P) + 3 if ind <= (len(T3)-1): P = T3[T3.index(P) + paso] next_traj = T3 # print(f'T3, {P}') else: if (random.random() < 0.5): next_traj = T6 P = T6[0] # print(f'T6, {P}') else: next_traj = T7 P = T7[0] # print(f'T7, {P}') if traj == T4: ind = T4.index(P) + paso if ind <= (len(T4)-1): P = T4[T4.index(P) + paso] next_traj = T4 # print(f'T4, {P}') else: if (random.random() < 0.5): next_traj = T5 P = T5[0] # print(f'T5, {P}') else: next_traj = T9 P = T9[0] # print(f'T9, {P}') if traj == T5: ind = T5.index(P) + paso if ind <= (len(T5)-1): P = T5[T5.index(P) + paso] next_traj = T5 # print(f'T5, {P}') else: if (random.random() < 0.5): next_traj = T14[::-1] P = T14[0] # print(f'T14, {P}') else: # (random.random() < 0.5): next_traj = T13 P = T13[0] # print(f'T13, {P}') # else: # next_traj = T15 # P = T15[0] # print(f'T15, {P}') if traj == T6: ind = T6.index(P) + paso if ind <= (len(T6)-1): P = T6[T6.index(P) + paso] next_traj = T6 # print(f'T6, {P}') else: if (random.random() < 0.5): next_traj = T11 P = T11[0] # print(f'T11, {P}') else: next_traj = T8[::-1] P = T8[0] # print(f'T8, {P}') if traj == T7: ind = T7.index(P) + paso if ind <= (len(T7)-1): P = T7[T7.index(P) + paso] next_traj = T7 # print(f'T7, {P}') else: if (random.random() < 0.5): next_traj = T9[::-1] P = T9[0] # print(f'T9, {P}') elif (random.random() < 0.5): next_traj = T8 P = T8[0] # print(f'T8, {P}') else: next_traj = T10 P = T10[0] # print(f'T10, {P}') if traj == T8: # ind = T8.index(P) + 3 # if ind <= (len(T8)-1): # P = T8[T8.index(P) + 3] # next_traj = T8 ## print(f'T8, {P}') # else: next_traj = T11 P = T11[0] # print(f'T11, {P}') if traj == T9: # ind = T9.index(P) + 3 # if ind <= (len(T9)-1): # P = T9[T9.index(P) + 3] # next_traj = T9 ## print(f'T9, {P}') # else: if (random.random() < 0.5): next_traj = T7[::-1] P = T7[0] # print(f'T7, {P}') elif (random.random() < 0.5): next_traj = T8 P = T8[0] # print(f'T8, {P}') else: next_traj = T10 P = T10[0] # print(f'T10, {P}') if traj == T10: ind = T10.index(P) + paso if ind <= (len(T10)-1): P = T10[T10.index(P) + paso] next_traj = T10 # print(f'T10, {P}') else: if (random.random() < 0.5): next_traj = T13[::-1] P = T13[::-1][0] # print(f'T13, {P}') elif (random.random() < 0.5): next_traj = T16 P = T16[0] # print(f'T16, {P}') else: next_traj = T12 P = T12[0] # print(f'T12, {P}') if traj == T11: ind = T11.index(P) + paso if ind <= (len(T11)-1): P = T11[T11.index(P) + paso] next_traj = T11 # print(f'T11, {P}') else: # next_traj = T12[::-1] # P = T12[0] # print(f'T12, {P}') if (random.random() < 0.5): next_traj = T17 P = T17[0] # print(f'T17, {P}') else: next_traj = T12[::-1] P = T12[::-1][0] # print(f'T12, {P}') if traj == T12: # ind = T12.index(P) + 3 # if ind <= (len(T12)-1): # P = T12[T12.index(P) + 3] # next_traj = T12 ## print(f'T12, {P}') # else: # next_traj = T11[::-1] # P = T11[0] # print(f'T11, {P}') if (random.random() < 0.5): next_traj = T17 P = T17[0] # print(f'T17, {P}') else: next_traj = T11[::-1] P = T11[::-1][0] # print(f'T11, {P}') if traj == T13: # ind = T13.index(P) + 3 # if ind <= (len(T13)-1): # P = T13[T13.index(P) + 3] # next_traj = T13 ## print(f'T13, {P}') # else: # next_traj = T10[::-1] # P = T10[0] # print(f'T10, {P}') if (random.random() < 0.5): next_traj = T16 P = T16[0] # print(f'T16, {P}') elif (random.random() < 0.5): next_traj = T12 P = T12[0] else: next_traj = T10[::-1] P = T10[::-1][0] # print(f'T10, {P}') if traj == T14: # ind = T14.index(P) + 3 # if ind <= (len(T14)-1): # P = T14[T14.index(P) + 3] # next_traj = T14 ## print(f'T14, {P}') # else: if (random.random() < 0.5): next_traj = T13 P = T13[0] # print(f'T13, {P}') elif(random.random() < 0.5): next_traj = T5[::-1] P = T5[::-1][0] # print(f'T5, {P}') else: next_traj = T15 P = T15[0] # print(f'T15, {P}')1 if traj == T15: ind = T15.index(P) + paso if ind <= (len(T15)-1): P = T15[T15.index(P) + paso] next_traj = T15 else: next_traj = T18 P = T18[19] if traj == T16: ind = T16.index(P) + paso if ind <= (len(T16)-1): P = T16[T16.index(P) + paso] next_traj = T16 else: next_traj = T18 P = T18[9] if traj == T17: ind = T17.index(P) + paso if ind <= (len(T17)-1): P = T17[T17.index(P) + paso] next_traj = T17 else: next_traj = T18 P = T18[0] if traj == T18: ind = T18.index(P) + paso if ind <= (len(T18)-1): P = T18[T18.index(P) + paso] next_traj = T18 else: next_traj = T1 P = T1[0] # print('T1') # Inverted direction if traj == T3[::-1]: ind = T3[::-1].index(P) + paso if ind <= (len(T3[::-1])-1): P = T3[::-1][T3[::-1].index(P) + paso] next_traj = T3[::-1] # print(f'T3-, {P}') else: next_traj = T4 P = T4[0] # print(f'T4, {P}') if traj == T4[::-1]: ind = T4[::-1].index(P) + paso if ind <= (len(T4[::-1])-1): P = T4[::-1][T4[::-1].index(P) + paso] next_traj = T4[::-1] # print(f'T4-, {P}') else: next_traj = T3 P = T3[0] # print(f'T3, {P}') if traj == T5[::-1]: ind = T5[::-1].index(P) + paso if ind <= (len(T5[::-1])-1): P = T5[::-1][T5[::-1].index(P) + paso] next_traj = T5[::-1] # print(f'T5-, {P}') else: if (random.random() < 0.5): next_traj = T9 P = T9[0] # print(f'T9, {P}') else: next_traj = T4[::-1] P = T4[::-1][0] # print(f'T4-, {P}') if traj == T7[::-1]: ind = T7[::-1].index(P) + paso if ind <= (len(T7[::-1])-1): P = T7[::-1][T7[::-1].index(P) + paso] next_traj = T7[::-1] # print(f'T7-, {P}') else: if (random.random() < 0.5): next_traj = T6 P = T6[0] # print(f'T6, {P}') else: next_traj = T3[::-1] P = T3[::-1][0] # print(f'T3-, {P}') if traj == T8[::-1]: # ind = T8[::-1].index(P) + 3 # if ind <= (len(T8[::-1])-1): # P = T8[::-1][T8[::-1].index(P) + 3] # next_traj = T8[::-1] ## print(f'T8-, {P}') # else: if (random.random() < 0.5): next_traj = T7[::-1] P = T7[::-1][0] # print(f'T7-, {P}') elif (random.random() < 0.5): next_traj = T9[::-1] P = T9[::-1][0] # print(f'T9-, {P}') else: next_traj = T10 P = T10[::-1][0] # print(f'T10-, {P}') if traj == T9[::-1]: # ind = T9[::-1].index(P) + 3 # if ind <= (len(T9[::-1])-1): # P = T9[::-1][T9[::-1].index(P) + 3] # next_traj = T9[::-1] ## print(f'T9-, {P}') # else: if (random.random() < 0.5): next_traj = T5 P = T5[0] # print(f'T5, {P}') else: next_traj = T4[::-1] P = T4[::-1][0] # print(f'T4-, {P}') if traj == T10[::-1]: ind = T10[::-1].index(P) + paso if ind <= (len(T10[::-1])-1): P = T10[::-1][T10[::-1].index(P) + paso] next_traj = T10[::-1] # print(f'T10-, {P}') else: if (random.random() < 0.5): next_traj = T7[::-1] P = T7[::-1][0] # print(f'T7-, {P}') elif (random.random() < 0.5): next_traj = T9[::-1] P = T9[::-1][0] # print(f'T9-, {P}') else: next_traj = T8 P = T8[::-1][0] # print(f'T8-, {P}') if traj == T11[::-1]: ind = T11[::-1].index(P) + paso if ind <= (len(T11[::-1])-1): P = T11[::-1][T11[::-1].index(P) + paso] next_traj = T11[::-1] # print(f'T11-, {P}') else: next_traj = T8[::-1] P = T8[::-1][0] # print(f'T8-, {P}') if traj == T12[::-1]: # ind = T12[::-1].index(P) + 3 # if ind <= (len(T12[::-1])-1): # P = T12[::-1][T12[::-1].index(P) + 3] # next_traj = T12[::-1] ## print(f'T12-, {P}') # else: # next_traj = T10[::-1] # P = T10[::-1][0] # print(f'T10-, {P}') if (random.random() < 0.5): next_traj = T16 P = T16[0] # print(f'T16, {P}') elif (random.random() < 0.5): next_traj = T10[::-1] P = T10[::-1][0] # print(f'T10-, {P}') else: next_traj = T13[::-1] P = T13[::-1][0] if traj == T13[::-1]: # ind = T13[::-1].index(P) + 3 # if ind <= (len(T13[::-1])-1): # P = T13[::-1][T13[::-1].index(P) + 3] # next_traj = T13[::-1] ## print(f'T13-, {P}') # else: # next_traj = T5[::-1] # P = T5[::-1][0] # print(f'T5-, {P}') if (random.random() < 0.5): next_traj = T14[::-1] P = T14[::-1][0] # print(f'T10-, {P}') elif (random.random() < 0.5): next_traj = T5[::-1] P = T5[::-1][0] # print(f'T10-, {P}') else: next_traj = T15 P = T15[0] if traj == T14[::-1]: # ind = T14[::-1].index(P) + 3 # if ind <= (len(T14[::-1])-1): # P = T14[::-1][T14[::-1].index(P) + 3] # next_traj = T14[::-1] ## print(f'T14-, {P}') # else: next_traj = T2 P = T2[0] # print(f'T2, {P}') return next_traj, P
[ "noreply@github.com" ]
noreply@github.com
bf9b4c55e0e0b67ded0e6452ab8893a773b3fb88
d469de9070628b7c56e283066d9122eb73c42dd2
/algorithms/data_structures/binary_tree.py
7dad06d856241373ca5e8bfd012d65a0b853afdc
[]
no_license
Rowing0914/Interview_Prep_Python
af26369ccb92c623fc2ac44e62d3f61e94046df6
a77a9b2342fbc9fc87b9f3670b0f3ab36f47eac7
refs/heads/master
2022-11-26T10:22:44.564728
2020-08-07T12:06:54
2020-08-07T12:06:54
269,878,434
2
0
null
null
null
null
UTF-8
Python
false
false
923
py
class Node: def __init__(self, value): self.l = None self.r = None self.v = value class BinaryTree: def __init__(self): self.root = None def add(self, item): if self.root == None: self.root = Node(value=item) else: self._add(item, self.root) def _add(self, item, node): if item > node.v: print("right: ", item) if node.r == None: node.r = Node(value=item) else: self._add(item, node.r) else: print("lefft: ", item) if node.l == None: node.l = Node(value=item) else: self._add(item, node.l) def printTree(self): if self.root == None: print("Nothing") else: self._printTree(self.root) def _printTree(self, node): if node != None: self._printTree(node.l) print(str(node.v) + " ") self._printTree(node.r) if __name__ == '__main__': tree = BinaryTree() tree.add(3) tree.add(4) tree.add(0) tree.add(8) tree.add(2) tree.printTree()
[ "kosakaboat@gmail.com" ]
kosakaboat@gmail.com
d531ac6b14b28efdbcaa7dbcc9edad4029ab4ccf
0ff562277646000e7f05c68e18133466effeb962
/seq2seq/evaluate.py
9356c281bfea4c511ab9d95e5d84048c069e162c
[]
no_license
zyxue/bio-seq2seq-attention
708fd8a73f69c8564d488c185dba792e3570cbed
692614f4d025c78800ecd6c104c430e2bff11edf
refs/heads/master
2020-04-16T21:34:59.626246
2019-02-22T00:42:40
2019-02-22T00:42:40
165,930,778
3
0
null
null
null
null
UTF-8
Python
false
false
1,839
py
import random import torch from seq2seq.plot import plot_attn # from seq2seq.utils import tensor_from_sentence, get_device def evaluate(src_lang, tgt_lang, enc, dec, tgt_sos_index, src_seq, seq_len): with torch.no_grad(): # shape: S X B X 1 src_tensor = tensor_from_sentence(src_lang, src_seq).view(-1, 1, 1) enc_hid = enc.init_hidden(batch_size=1) enc_outs, enc_hid = enc(src_tensor, enc_hid) if enc.bidirectional: # as the enc_outs has a 2x factor for hidden size, so reshape hidden to # match that enc_hid = torch.cat([ enc_hid[:enc.num_layers, :, :], enc_hid[enc.num_layers:, :, :] ], dim=2) device = get_device() dec_in = torch.tensor([[tgt_sos_index]], device=device).view(-1, 1) dec_hid = enc_hid dec_outs = [] dec_attns = torch.zeros(seq_len, seq_len) for di in range(seq_len): dec_out, dec_hid, dec_attn = dec(dec_in, dec_hid, enc_outs) dec_attns[di] = dec_attn.view(-1) topv, topi = dec_out.data.topk(1) dec_outs.append(tgt_lang.index2word[topi.item()]) dec_in = topi.detach() return dec_outs, dec_attns[:di + 1] def evaluate_randomly(src_lang, tgt_lang, enc, dec, tgt_sos_index, num, iter_idx): for i in range(num): src_seq, tgt_seq, seq_len = random.choice(pairs) print('>', src_seq) print('=', tgt_seq) prd_tokens, attns = evaluate( src_lang, tgt_lang, enc, dec, tgt_sos_index, src_seq, seq_len) prd_seq = ''.join(prd_tokens) print('<', prd_seq) acc = U.calc_accuracy(tgt_seq, prd_seq) print('acc: {0}'.format(acc)) plot_attn(attns, src_seq, prd_seq, acc, iter_idx)
[ "alfred532008@gmail.com" ]
alfred532008@gmail.com
87503f32f0ebd1aa3c6acc09980ebdaeb4ed6a34
0438cb6726cd47f17b75cc960d457e433beeed95
/tests/test_cli.py
7e6cc9f4c08378936ae125b5e9812674ea17fbb7
[ "MIT" ]
permissive
boydgreenfield/metasort
3071aa4600f6b5f0ba9eeb431b1cbcc7c1399102
27622d75f36b1dde959c269cb90b57f4110d813b
refs/heads/master
2021-01-22T20:39:08.266721
2015-04-10T18:57:12
2015-04-10T18:57:12
33,745,227
0
0
null
2015-04-10T18:53:23
2015-04-10T18:53:23
null
UTF-8
Python
false
false
49
py
from nose.tools import * def test_base(): pass
[ "wave@phel.im" ]
wave@phel.im
1dbec0cd8d756ebeae9a779507e72fa0e3c38631
3d06eeebdd598efba25d29d7e3d03d90ede1bfbd
/18_lesson(django)/video-shop/videostore/courses/forms.py
25df6a10b202d97a7c1598c18ec17325dee5ec84
[]
no_license
duk1edev/itproger
58bdd16088dec7864585d318935b118ce584874d
786f94fff6d816f3f978bd8c24c3d985ffd5ffb2
refs/heads/master
2021-01-02T02:43:32.684100
2020-03-28T18:10:25
2020-03-28T18:10:25
239,443,309
0
1
null
null
null
null
UTF-8
Python
false
false
571
py
from django import forms from .models import Course class CreateCourseForm(forms.ModelForm): def __init__(self, *args, **kwargs): super(CreateCourseForm, self).__init__(*args, **kwargs) self.fields['slug'].label = 'Название URL' self.fields['title'].label = 'Название курса' self.fields['description'].label = 'Описание курса' self.fields['img'].label = 'Изображение профиля' class Meta: model = Course fields = ['slug', 'title', 'description', 'img']
[ "duk1e.ptc.ua@yandex.ru" ]
duk1e.ptc.ua@yandex.ru
8af6656adc1248aaec6594d9ca08c2d83462f6ee
bd03fd4503b9249ca93dcae8b4e6d392039affff
/Desafios/desafio51.py
a5c3afb0130abda8898a0192219f1ecbc7101b78
[ "MIT" ]
permissive
ArthurBrito1/MY-SCRIPTS-PYTHON
f40e81cada60fb498249c65a444e8d8ca9be6c0a
86967fe293715a705ac50e908d3369fa3257b5a2
refs/heads/master
2020-09-14T07:18:33.061121
2020-08-05T18:00:33
2020-08-05T18:00:33
223,062,133
1
0
null
null
null
null
UTF-8
Python
false
false
229
py
print('='*20) print('10 TERMOS DE UMA PA') print('='*20) pt = int(input('Primeiro termo:')) r = int(input('Razão:')) dec = pt + (10-1)*r for c in range(pt, dec+1, r): print('{}'.format(c), end=' >> ') print('ACABOU')
[ "noreply@github.com" ]
noreply@github.com
647577be7019d95438e3a5c1aa3b2dcbafb93134
c6053ad14e9a9161128ab43ced5604d801ba616d
/Public/Public_zqxt_99/__init__.py
4f5ee4f58760d9dfb875cffb3773d9d9dbf5771b
[]
no_license
HesterXu/Home
0f6bdace39f15e8be26031f88248f2febf33954d
ef8fa0becb687b7b6f73a7167bdde562b8c539be
refs/heads/master
2020-04-04T00:56:35.183580
2018-12-25T02:48:51
2018-12-25T02:49:05
155,662,403
0
0
null
null
null
null
UTF-8
Python
false
false
164
py
# -*- coding: utf-8 -*- # @Time : 2018/12/11/10:55 # @Author : Hester Xu # Email : xuruizhu@yeah.net # @File : __init__.py.py # @Software : PyCharm
[ "xuruizhu@yeah.net" ]
xuruizhu@yeah.net
27577e7db85523739cc0108bcaeaa2e2bcf01f28
f1fb0181fe5b3d1ea7445f532cb5cce5e20a0f70
/ecomplexity/ComplexityData.py
9f249d33754bba6d68ee417b7e73ae955759134a
[ "MIT" ]
permissive
complexly/py-ecomplexity
bb8a0775415d70f5305645d2e29af5b7db206e6b
772fbae9eaa4f995be725d05eed9a6fa6a7c7156
refs/heads/master
2022-04-17T01:03:26.217566
2020-04-17T12:50:43
2020-04-17T12:50:43
256,500,518
0
0
MIT
2020-04-17T12:45:23
2020-04-17T12:45:22
null
UTF-8
Python
false
false
6,481
py
# Complexity calculations import numpy as np import pandas as pd import warnings import sys from functools import wraps import time import datetime class ComplexityData(object): """Calculate complexity and other related results Args: data: pandas dataframe containing production / trade data. Including variables indicating time, location, product and value cols_input: dict of column names for time, location, product and value. Example: {'time':'year', 'loc':'origin', 'prod':'hs92', 'val':'export_val'} val_errors_flag: {'coerce','ignore','raise'}. Passed to pd.to_numeric *default* coerce. Attributes: data: clean data with standardized column names """ def __init__(self, data, cols_input, val_errors_flag): self.data = data.copy() # Standardize column names based on input self.rename_cols(cols_input) # Clean data to handle NA's and such self.clean_data(val_errors_flag) def rename_cols(self, cols_input): """Standardize column names""" cols_map_inv = {v: k for k, v in cols_input.items()} self.data = self.data.rename(columns=cols_map_inv) self.data = self.data[['time', 'loc', 'prod', 'val']] def clean_data(self, val_errors_flag_input): """Clean data to remove non-numeric values, handle NA's and duplicates""" # Make sure values are numeric self.data.val = pd.to_numeric( self.data.val, errors=val_errors_flag_input) self.data.set_index(['time', 'loc', 'prod'], inplace=True) if self.data.val.isnull().values.any(): warnings.warn('NaN value(s) present, coercing to zero(es)') self.data.val.fillna(0, inplace=True) # Remove duplicates dups = self.data.index.duplicated() if dups.sum() > 0: warnings.warn( 'Duplicate values exist, keeping the first occurrence') self.data = self.data[~self.data.index.duplicated()] def create_full_df(self, t): """Rectangularize, but remove rows with diversity or ubiquity zero Rows with zero diversity / ubiquity lead to dividebyzero errors and incorrect values during normzalization """ self.data_t = self.data.loc[t].copy() diversity_check = self.data_t.reset_index().groupby( ['loc'])['val'].sum().reset_index() ubiquity_check = self.data_t.reset_index().groupby( ['prod'])['val'].sum().reset_index() diversity_check = diversity_check[diversity_check.val != 0] ubiquity_check = ubiquity_check[ubiquity_check.val != 0] self.data_t = self.data_t.reset_index() self.data_t = self.data_t.merge( diversity_check[['loc']], on='loc', how='right') self.data_t = self.data_t.merge( ubiquity_check[['prod']], on='prod', how='right') self.data_t.set_index(['loc','prod'], inplace=True) data_index = pd.MultiIndex.from_product( self.data_t.index.levels, names=self.data_t.index.names) self.data_t = self.data_t.reindex(data_index, fill_value=0) def calculate_rca(self): """Calculate RCA""" # Convert data into numpy array loc_n_vals = len(self.data_t.index.levels[0]) prod_n_vals = len(self.data_t.index.levels[1]) data_np = self.data_t.values.reshape((loc_n_vals, prod_n_vals)) # Calculate RCA, disable dividebyzero errors with np.errstate(divide='ignore', invalid='ignore'): num = (data_np / np.nansum(data_np, axis=1)[:, np.newaxis]) loc_total = np.nansum(data_np, axis=0)[np.newaxis, :] world_total = np.nansum(loc_total, axis=1)[:, np.newaxis] den = loc_total / world_total self.rca_t = num / den def calculate_rpop(self, pop, t): """Calculate RPOP""" # After constructing df with all combinations, convert data into ndarray loc_n_vals = len(self.data_t.index.levels[0]) prod_n_vals = len(self.data_t.index.levels[1]) data_np = self.data_t.values.reshape( (loc_n_vals, prod_n_vals)) pop.columns = ['time', 'loc', 'pop'] pop_t = pop[pop.time == t].copy() pop_t = pop_t.drop(columns="time") pop_t = pop_t.reset_index(drop=True).set_index('loc') pop_index = self.data_t.index.unique('loc') pop_t = pop_t.reindex(pop_index) pop_t = pop_t.values # print(pop_t.shape, data_np.shape) num = data_np / pop_t # print("Num done. Num shape {}".format(num.shape)) loc_total = np.nansum(data_np, axis=0)[np.newaxis, :] world_pop_total = np.nansum(pop_t) den = loc_total / world_pop_total # print("Den done. Den shape {}".format(den.shape)) rpop = num / den self.rpop_t = rpop def calculate_mcp(self, rca_mcp_threshold_input, rpop_mcp_threshold_input, presence_test, pop, t): """Calculate MCP based on RCA / RPOP / both""" def convert_to_binary(x, threshold): x = np.nan_to_num(x) x = np.where(x >= threshold, 1, 0) return(x) if presence_test == "rca": self.mcp_t = convert_to_binary(self.rca_t, rca_mcp_threshold_input) elif presence_test == "rpop": self.calculate_rpop(pop, t) self.mcp_t = convert_to_binary(self.rpop_t, rpop_mcp_threshold_input) elif presence_test == "both": self.calculate_rpop(pop, t) self.mcp_t = convert_to_binary(self.rca_t, rca_mcp_threshold_input) + \ convert_to_binary(self.rpop_t, rpop_mcp_threshold_input) def calculate_manual_mcp(self): """If pre-computed MCP supplied, check validity and reshape""" # Test to see if indeed MCP if np.any(~np.isin(self.data_t.values, [0, 1])): error_val = self.data_t.values[~np.isin( self.data_t.values, [0, 1])].flat[0] raise ValueError( "Manually supplied MCP column contains values other than 0 or 1 - Val: {}".format(error_val)) # Convert data into numpy array loc_n_vals = len(self.data_t.index.levels[0]) prod_n_vals = len(self.data_t.index.levels[1]) data_np = self.data_t.values.reshape( (loc_n_vals, prod_n_vals)) self.mcp_t = data_np
[ "shreyas.gm61@gmail.com" ]
shreyas.gm61@gmail.com
d6d72188452f31c22020a56e6b82f84f7b7910e4
041d242a0eb55495cf657c6832b5ed23628c1548
/step/최단경로/9370_미확인 도착지/지언.py
eb2fac63e7f1071bfc42bd393a9f964de72d9441
[]
no_license
easyearn77/Algorithm_baekjoon
4ba17f8435f116ee83be70edb93f3cb8f555361b
3d4d1f60c7564865683ac09624e2381611124ebc
refs/heads/main
2023-07-20T02:16:46.972552
2021-08-28T15:02:27
2021-08-28T15:02:27
362,725,267
0
1
null
2021-06-12T15:38:45
2021-04-29T07:17:31
Jupyter Notebook
UTF-8
Python
false
false
1,136
py
import sys from heapq import heappush,heappop input = sys.stdin.readline INF=100000000 def dijkstra(start): heap = [] dist = [INF for _ in range(n+1)] dist[start]=0 heappush(heap, [0, start]) while heap: distance, node = heappop(heap) for cn in graph[node]: i_cn_d = distance + cn[1] if dist[cn[0]] > i_cn_d: dist[cn[0]] = i_cn_d heappush(heap, [i_cn_d, cn[0]]) return dist T = int(input()) for _ in range(T): n, m, t = map(int, input().split()) s, g, h = map(int, input().split()) graph = [[] for _ in range(n + 1)] for _ in range(m): a, b, d = map(int, input().split()) graph[a].append([b, d]) graph[b].append([a, d]) dest = [] for _ in range(t): dest.append(int(input())) stoe=dijkstra(s) gtoe=dijkstra(g) htoe=dijkstra(h) answer=[] for e in dest: if stoe[g] + gtoe[h] + htoe[e] == stoe[e] or stoe[h] + htoe[g] + gtoe[e] ==stoe[e]: answer.append(e) answer.sort() for ans in answer: print(ans,end=' ') print()
[ "noreply@github.com" ]
noreply@github.com
125eac8a5e1310b4e67608eba3a5b4ceb227c24f
b8dd5bc2231d692cbfd647ab4b22258a220af2dd
/test/test_integration.py
5eeebfd39535acccc8712ad4de49e37ce400bfe7
[]
no_license
federicorenda/ctr-design-and-path-plan
94d5c2873b3eb0cec2a1b60beef508c5c0055d7b
12a573a93334038699c164d84dee02eb5dbcd0c1
refs/heads/master
2022-04-19T20:00:10.270571
2020-03-30T23:43:52
2020-03-30T23:43:52
null
0
0
null
null
null
null
UTF-8
Python
false
false
3,333
py
import pathlib import unittest import pyvista as pv from ctrdapp.config.parse_config import parse_config from ctrdapp.model.model import create_model from ctrdapp.solve.visualize_utils import visualize_curve_single, add_single_curve, add_objects from ctrdapp.heuristic.heuristic_factory import create_heuristic_factory from ctrdapp.collision.collision_checker import CollisionChecker from ctrdapp.solve.solver_factory import create_solver class VisualizeUtilsTest(unittest.TestCase): def test_visualize(self): # need config path = pathlib.Path().absolute() file = path / "configuration" / "config_integration.yaml" configuration, dictionaries = parse_config(file) objects_file = path / "configuration" / configuration.get("collision_objects_filename") # need model this_model = create_model(config=configuration, q=[[0.01, 0.0005], [0.02, 0.0007]]) # need to visualize g_out = this_model.solve_g(indices=[0, 0]) visualize_curve_single(g_out, objects_file, configuration.get("tube_number"), configuration.get("tube_radius")) def test_RRT(self): path = pathlib.Path().absolute() file = path / "configuration" / "config_integration.yaml" configuration, dictionaries = parse_config(file) objects_file = path / "configuration" / configuration.get("collision_objects_filename") this_model = create_model(config=configuration, q=[[-0.01391], [0.02875]]) # heuristic factory heuristic_factory = create_heuristic_factory(configuration, dictionaries.get("heuristic")) # collision detector collision_detector = CollisionChecker(objects_file) # rrt this_solver = create_solver(this_model, heuristic_factory, collision_detector, configuration) # call get_best_cost cost, best_ind = this_solver.get_best_cost() this_solver.visualize_best_solution(objects_file) this_solver.visualize_best_solution_path(objects_file) def test_visualize_solve_once(self): # create model path = pathlib.Path().absolute() file = path / "configuration" / "config_integration.yaml" configuration, dictionaries = parse_config(file) objects_file = path / "configuration" / configuration.get("collision_objects_filename") configuration["strain_bases"] = "linear, linear, quadratic" this_model = create_model(config=configuration, q=[[-0.02, 0.001], [0.03, 0.002], [0.01, 0.0001]]) # get g previous (using solve_g) insert_indices = [100, 100, 100] prev_g = this_model.solve_g(indices=insert_indices) # try small step size + visualize delta_theta_s = [0.2, 0.5, 0.4] delta_ins_s = [6, 5, 30] prev_ins = [10, 10, 10] g_out, eta_out, indices, true_insertion = this_model.solve_integrate(delta_theta_s, delta_ins_s, prev_ins, prev_g, invert_insert=False) plotter = pv.Plotter() # g_trunc = truncate_g(g_out, indices) add_single_curve(plotter, g_trunc, 3, configuration.get("tube_radius"), None) add_objects(plotter, objects_file) plotter.show() # try large step size + visualize if __name__ == '__main__': unittest.main()
[ "conorsmesser@gmail.com" ]
conorsmesser@gmail.com
2e3830270c2f597bdc8eabd24a620e474ff45bf7
ed76db3a268a9253837e85130c0f221bd904bff0
/BFS DFS Tree Trie/[x] 951. Flip Equivalent Binary Trees.py
977dc67a611809853b31324c4280f92d03a16f05
[]
no_license
jay-joo-code/leetcode
f54db01f195f35d436e524d6e257ad755525eb69
349bd6d54a3f463499b9f59d7aec01c9dd1fc9d0
refs/heads/master
2022-11-30T21:17:34.602100
2020-08-09T05:55:37
2020-08-09T05:55:37
null
0
0
null
null
null
null
UTF-8
Python
false
false
525
py
# attempt (AC) after mistakes # misunderstood question at first def flipEquiv(self, root1: TreeNode, root2: TreeNode) -> bool: if not root1 and not root2: return True if not root1 or not root2: return False flip = self.flipEquiv(root1.left, root2.right) and self.flipEquiv(root1.right, root2.left) noflip = self.flipEquiv(root1.right, root2.right) and self.flipEquiv(root1.left, root2.left) return root1.val == root2.val and (flip or noflip)
[ "jj534@cornell.edu" ]
jj534@cornell.edu
843d02469e85866f10c030b14a8b34b1ddb154ba
cfcd117378664e4bea080b3c1011a25a575b3d51
/hawc/apps/vocab/migrations/0004_term_uid.py
f894ab0af5c902c93c900e051fb9821419084ebb
[ "MIT" ]
permissive
shapiromatron/hawc
9d3a625da54d336334da4576bd5dac6915c18d4f
51177c6fb9354cd028f7099fc10d83b1051fd50d
refs/heads/main
2023-08-03T13:04:23.836537
2023-08-01T18:39:16
2023-08-01T18:39:16
25,273,569
25
15
NOASSERTION
2023-09-14T17:03:48
2014-10-15T21:06:33
Python
UTF-8
Python
false
false
348
py
from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("vocab", "0003_load_v1"), ] operations = [ migrations.AddField( model_name="term", name="uid", field=models.PositiveIntegerField(blank=True, null=True, unique=True), ), ]
[ "noreply@github.com" ]
noreply@github.com
c8cfb5d4514a6c437d9a1641968d93669810379e
7cd0848c38bfc8edabdd4ea71311ba62dde6920a
/DL_ICP1/Source/basicOP.py
768bc7b4561be5d325a5ca0e44591ea25d8ac518
[]
no_license
koushikkatakam1995/Python-DeepLearning_ICP
e500c717d754a4c337bc4562716c9520d3c1a35e
0cffb30d30e0fc13fb3ff8a173eabefe502a3e7e
refs/heads/master
2020-06-02T05:14:53.284445
2019-07-24T03:10:43
2019-07-24T03:10:43
191,043,773
0
1
null
null
null
null
UTF-8
Python
false
false
1,139
py
import pandas from keras.models import Sequential from keras.layers.core import Dense, Activation # load dataset from sklearn.model_selection import train_test_split import pandas as pd dataset = pd.read_csv("diabetes.csv", header=None).values # print(dataset) import numpy as np X_train, X_test, Y_train, Y_test = train_test_split(dataset[:,0:8], dataset[:,8], test_size=0.25, random_state=87) np.random.seed(155) my_first_nn = Sequential() # create model my_first_nn.add(Dense(20, input_dim=8, activation='relu')) # hidden layer my_first_nn.add(Dense(30, activation='sigmoid')) # hidden layer my_first_nn.add(Dense(10, activation='relu')) # hidden layer my_first_nn.add(Dense(15, activation='tanh')) # hidden layer my_first_nn.add(Dense(9, activation='relu')) # hidden layer my_first_nn.add(Dense(1, activation='sigmoid')) # output layer my_first_nn.compile(loss='binary_crossentropy', optimizer='adam') my_first_nn_fitted = my_first_nn.fit(X_train, Y_train, epochs=100, verbose=0, initial_epoch=0) print(my_first_nn.summary()) print(my_first_nn.evaluate(X_test, Y_test, verbose=0))
[ "koushik.katakam@gmail.com" ]
koushik.katakam@gmail.com
607b6c4c1e427642a1161ae9205245f6d6679a3a
bf1275ed5aaac8635acbdbe65ac90b9158c88b22
/src/candles.py
811e79e3ba98f21499fefc2b899077a2b75d26d8
[]
no_license
SpearVFX/Paramaker
e07c7371499e9794db94c3dd3949f21d2657eb81
15a981ae159c1df5b800ecd43a91d4d5df5510bb
refs/heads/master
2023-04-18T01:48:04.855565
2021-05-07T13:16:22
2021-05-07T13:16:22
339,559,820
0
0
null
null
null
null
UTF-8
Python
false
false
4,180
py
import cryptowatch as cw from termcolor import colored from datetime import datetime import colorama colorama.init() # Simple candle used for easier code readability class Candle: def __init__(self, timestamp, opened, high, low, closed, volume, volume_quote): self.timestamp = timestamp self.opened = opened self.high = high self.low = low self.closed = closed self.volume = volume self.volume_quote = volume_quote # Currently I'm using this only for convinience - in later stages this should be the __str__ represantion def __repr__(self): if self.opened > self.closed: color = 'red' else: color = 'green' return colored('O: {}; H: {}; L: {} C: {}'.format(self.opened, self.high, self.low, self.closed), color) # def __iter__ # This is pretty much a wrapper for an array of Candles with extra functionalities, but mainly for better readability class CandleChart: def __init__(self, exchange, pair, period): self.data = [] # This current implementation is for the cryptowatch sdk # If you want to use another API you should implement your own # init method and replace with this one self.init_cryptowatch(exchange, pair, period) self.exchange = exchange self.pair = pair self.period = period def closed_values(self): return [value.closed for value in self.data] def __setitem__(self, key): # We don't want setting custom candles, tis market manipulation pass def __getitem__(self, key): return self.data[key] def __iter__(self): return iter(self.data) def __len__(self): return len(self.data) """-------------------------------These are specific for Cryptowatch API-----------------------------------""" # Each candle is a list of [close_timestamp, opened, high, low, closed, volume, volume_quote] def init_cryptowatch(self, exchange, pair, period): #ohlc stands for opened,high,low,closed; ohlc_data = self.get_candles_cryptowatch(exchange, pair, period) for ohlc in ohlc_data: self.data.append(Candle(ohlc[0], ohlc[1], ohlc[2], ohlc[3], ohlc[4], ohlc[5], ohlc[6], )) def get_candles_cryptowatch(self, exchange, pair, period): with open("credentials.txt", 'r') as f: cw.api_key = f.readline() ticker = "{}:{}".format(exchange, pair).upper() try: candles = cw.markets.get(ticker, ohlc=True, periods=[period]) except: print(ticker) print("""An Error occurred trying to get the candle data for \n {} {} {}""".format(str(exchange), str(ticker), str(period))) if period == "1m": return candles.of_1m if period == "3m": return candles.of_3m if period == "5m": return candles.of_5m if period == "15m": return candles.of_15m if period == "30m": return candles.of_30m if period == "1h": return candles.of_1h if period == "2h": return candles.of_2h if period == "4h": return candles.of_4h if period == "6h": return candles.of_6h if period == "12h": return candles.of_12h if period == "1d": return candles.of_1d if period == "3d": return candles.of_3d if period == "1w": return candles.of_1w if period == "1w_monday": return candles.of_1w_monday """-------------------------------------------------------------------------------------------------------"""
[ "alexander.kostov37@gmail.com" ]
alexander.kostov37@gmail.com
c0feccb9c2fb708155fac9d7ec92d68d6dfbe30c
65a6dc575655fbd668066b78b24f4a2c95c161b0
/World.py
4c63e1cb7b49d15f887f2628b9aa55b7afeb0a7a
[]
no_license
Coaaal/MechWarrior_Game
13965d26d5a29e0c94452ca57bda77dc60f4b3e6
9e22c0ce6a13d2671d4df15f53956313b07e9175
refs/heads/master
2020-07-14T15:48:58.188644
2017-05-26T05:23:44
2017-05-26T05:23:44
73,873,580
0
0
null
2017-05-26T05:23:45
2016-11-16T02:00:07
Python
UTF-8
Python
false
false
4,366
py
from Pawns import * from Entity import * vec = pg.math.Vector2 class World: def __init__(self): self.world_coords_origin = SCREEN_WIDTH/2, SCREEN_HEIGHT/2 self.world_coords_offset = (0, 0) self.player_1 = None self.player_sprite = None self.sprite_sheet = None self.all_sprites = None self.offset_x = None self.offset_y = None self.floor_tile = None self.current_tiles = [] self.render_surface = RenderSurface(self) def update_coords(self, vector_object): self.world_coords_offset = vector_object def retrieve_coords(self): return self.world_coords_offset def generate_additional_world(self): pass def update(self): self.player_1.update() previous_sprite_count = -1 next_sprite_count = 1 currCount = 0 amount_tiles_wide = int(self.render_surface.rect.width / TILE_SIZE) for current_sprite in self.all_sprites.sprites(): current_sprite.rect.x += int(self.player_1.vel.x + 0.5 * self.player_1.acc.x) current_sprite.rect.y += int(self.player_1.vel.y + 0.5 * self.player_1.acc.y) for current_sprite in self.all_sprites.sprites(): position_modifier1 = currCount position_modifier2 = currCount if next_sprite_count == len(self.all_sprites): next_sprite_count = 0 previous_sprite_x_right = self.all_sprites.sprites()[previous_sprite_count].rect.right next_sprite_x_left = self.all_sprites.sprites()[next_sprite_count].rect.left if current_sprite.rect.x < self.render_surface.rect.x: current_sprite.rect.x = previous_sprite_x_right if current_sprite.rect.right > self.render_surface.rect.x + self.render_surface.rect.width: current_sprite.rect.right = next_sprite_x_left if current_sprite.rect.top - 1 < self.render_surface.rect.y: for a in range(amount_tiles_wide): position_modifier1 -= 1 if position_modifier1 == -1: position_modifier1 = len(self.all_sprites.sprites()) - 1 previous_sprite_y_bot = self.all_sprites.sprites()[position_modifier1].rect.bottom current_sprite.rect.top = previous_sprite_y_bot if current_sprite.rect.bottom > self.render_surface.rect.y + self.render_surface.rect.height: for b in range(amount_tiles_wide): position_modifier2 += 1 if position_modifier2 == len(self.all_sprites): position_modifier2 = 0 next_sprite_y_top = self.all_sprites.sprites()[position_modifier2].rect.top current_sprite.rect.bottom = next_sprite_y_top previous_sprite_count += 1 next_sprite_count += 1 currCount += 1 def new(self): self.all_sprites = pg.sprite.OrderedUpdates() self.player_sprite = pg.sprite.OrderedUpdates() for a in range(int(self.render_surface.rect.height / TILE_SIZE) + 1): for b in range(int(self.render_surface.rect.width / TILE_SIZE)): self.floor_tile = GameEntity(self, TILE_SIZE, TILE_SIZE, DESSERT) self.offset_x = (self.render_surface.rect.x + TILE_SIZE * b) self.offset_y = (self.render_surface.rect.y + TILE_SIZE * a) self.floor_tile.rect.x = self.offset_x self.floor_tile.rect.y = self.offset_y # self.render_surface.blit(self.floor_tile.image, (self.offset_x, self.offset_y), None) self.all_sprites.add(self.floor_tile) self.player_1 = Player(self, TILE_SIZE, TILE_SIZE) self.player_1.rect.center = self.render_surface.rect.center self.player_sprite.add(self.player_1) class RenderSurface(pg.Surface): def __init__(self, game, width=SURFACE_WIDTH, height=SURFACE_HEIGHT): pg.Surface.__init__(self, (width, height)) self.game = game self.width = width self.height = height self.image = pg.Surface((self.width, self.height)) self.rect = self.image.get_rect() self.rect.center = [SCREEN_WIDTH/2, SCREEN_HEIGHT/2] # self.game.screen.blit(self.image, self.rect)
[ "coaaal@gmail.com" ]
coaaal@gmail.com
4e995985f87985f9f0b38c60bb6d5e64cebe60d5
f867688397d14ed17d7bdf36a3333f7ecb7515a7
/ncaambb/teamtwitters.py
0661e172499b299b823cc5bd215a55cc551c733e
[]
no_license
jlweichen/sportstwitter
9204710fbc0b5f3b4af6f9011b413d13d42e7877
f4fb77d3fd79830bad5573aacd1504d95d58c5fe
refs/heads/master
2021-05-14T03:52:40.200876
2020-03-01T21:09:51
2020-03-01T21:09:51
116,629,996
0
0
null
null
null
null
UTF-8
Python
false
false
10,178
py
teams = { 'Gonzaga':'@ZagMBB', 'Auburn': '@AuburnMBB', 'Kentucky':'@KentuckyMBB', 'Colorado':'@CUBuffsMBB', 'Oregon':'@OregonMBB', 'LSU':'@LSUBasketball', 'BYU':'@BYUbasketball', 'Arizona':'@APlayersProgram', 'SFA':'@SFA_MBB', 'ETSU':'@ETSU_MBB', 'Colgate':'@ColgateMBB', 'Prairie View A&M':'@PVAMUPanthers', 'South Dakota St.': '@GoJacksMBB', 'Little Rock':'@LittleRockMBB', 'Stanford':'@StanfordMBB', 'Alabama':'@AlabamaMBB', 'Loyola(MD)': '@LoyolaMBB', 'Lehigh':'@LehighMBB', 'Arizona St.': '@SunDevilHoops', 'Southern California': '@USC_Hoops', 'UCLA': '@UCLAMBB', 'Missouri': '@MizzouHoops', 'Saint Mary\'s (CA)': '@saintmaryshoops', 'Florida': '@GatorsMBK', 'Arkansas': '@RazorbackMBB', 'Mississippi St.': '@HailStateMBK', 'UNC Greensboro': '@UNCGBasketball', 'Tennessee': '@Vol_Hoops', 'South Carolina': '@GamecockMBB', 'Washington': '@UW_MBB', 'Furman': '@FurmanHoops', 'Oregon St.': '@BeaverMBB', 'Ole Miss': '@OleMissMBB', 'Utah': '@UtahMBB', 'San Francisco': '@USFDonsMBB', 'Georgia': '@UGABasketball', 'Vanderbilt': '@VandyMBB', 'Washington St.': '@WSUCougarMBB', 'Pacific': '@PacificMensBB', 'Georgia St.': '@GeorgiaStateMBB', 'UT Arlington': '@UTAMavsMBB', 'Texas St.': '@TXStateMBB', 'Texas A&M':'@aggiembk', 'Pepperdine': '@PeppBasketball', 'New Mexico State': '@NMStateMBB' } a10 = { 'Davidson':'@DavidsonMBB', 'Dayton':'@DaytonMBB', 'Duquesne':'@DuqMBB', 'Fordham':'@FordhamMBB', 'George Mason':'@MasonMBB', 'George Washington':'@GW_MBB', 'La Salle':'@LaSalle_MBB', 'Rhode Island': '@RhodyMBB', 'Richmond':'@SpiderMBB', 'Saint Joseph\'s':'@SJUHawks_MBB', 'Saint Louis':'@SaintLouisMBB', 'St. Bonaventure':'@BonniesMBB', 'UMass':'@UMassBasketball', 'VCU':'@VCU_Hoops' } acc = { 'Boston College':'@BCMBB', 'Clemson':'@ClemsonMBB', 'Duke':'@DukeMBB', 'Florida St.': '@FSUHoops', 'Georgia Tech':'@GTMBB', 'Louisville':'@LouisvilleMBB', 'Miami (FL)':'@CanesHoops', 'NC State':'@PackMensBball', 'North Carolina':'@UNC_Basketball', 'Notre Dame':'@NDMBB', 'Pittsburgh':'@Pitt_MBB', 'Syracuse':'@Cuse_MBB', 'Virginia':'@UVAMensHoops', 'Virginia Tech':'@HokiesMBB', 'Wake Forest':'@WakeMBB' } asun = { 'Florida Gulf Coast':'@FGCU_MBB', 'Kennesaw State':'@KSUOwlsMBB', 'Jacksonville':'@JAX_MBB', 'Liberty': '@LibertyMBB_', 'Lipscomb':'@LipscombMBB', 'NJIT':'@NJITHoops', 'North Alabama':'@UNA_Basketball', 'North Florida':'@OspreysMBB', 'Stetson':'@StetsonMBB' } ameast = { 'Albany':'@UAlbanyMBB', 'Binghamton':'@BinghamtonMBB', 'Hartford':'@HartfordMBB', 'Maine':'@BlackBearsMBB', 'New Hampshire':'@UNHMBB', 'Stony Brook':'@StonyBrookMBB', 'UMass Lowell':'@RiverHawkMBB', 'UMBC':'@UMBC_MBB', 'Vermont':'@UVMmbb' } american = { 'Cincinnati':'@GoBearcatsMBB', 'East Carolina':'@ecubasketball', 'Houston':'@UHCougarMBK', 'Memphis':'@Memphis_MBB', 'SMU':'@SMUBasketball', 'South Fla.':'@USFMBB', 'Temple':'@TUMBBHoops', 'Tulane':'@GreenWaveMBB', 'Tulsa':'@TUMBasketball', 'UCF':'@UCF_MBB', 'UConn':'@UConnMBB', 'Wichita St.':'@GoShockersMBB' } big12 = { 'Baylor':'@BaylorMBB', 'Iowa St.':'@CycloneMBB', 'Kansas':'@KUHoops', 'Kansas St.':'@KStateMBB', 'TCU':'@TCUBasketball', 'Texas':'@TexasMBB', 'Texas Tech':'@TexasTechMBB', 'Oklahoma':'@OU_MBBall', 'Oklahoma St.':'@OSUMBB', 'West Virginia':'@WVUhoops' } bigeast = { 'Butler':'@ButlerMBB', 'Creighton':'@BluejayMBB', 'DePaul':'@DePaulHoops', 'Georgetown':'@GeorgetownHoops', 'Marquette':'@MarquetteMBB', 'Providence':'@PCFriarsmbb', 'Seton Hall':'@SetonHallMBB', 'St. John\'s (NY)':'@StJohnsBBall', 'Villanova':'@NovaMBB', 'Xavier':'@XavierMBB', } bigsky = { 'Eastern Wash.':'@EWUMBB', 'Idaho':'@VandalHoops', 'Idaho State':'@IdahoStateBball', 'Montana':'@MontanaGrizBB', 'Montana St.':'@MSUBobcatsMBB', 'Northern Arizona':'@NAUBasketball', 'Northern Colo.':'@UNC_BearsMBB', 'Portland St.':'@psuviksMBB', 'Sacramento St.': '@SacHornetsMBB', 'Southern Utah':'@SUUBasketball', 'Weber State':'@WeberStateMBB' } bigsouth = { 'Campbell':'@GoCamelsMBB', 'Charleston Southern':'@CSU_Mbball', 'Gardner-Webb':'@GWU_MBK', 'Hampton':'@Hampton_MBB', 'High Point':'@HPUMBB', 'Longwood':'@LongwoodMBB', 'Presbyterian':'@BlueHoseHoops', 'Radford':'@RadfordMBB', 'USC Upstate':'@UpstateMB', 'UNC Asheville':'@UNCAvlMBB', 'Winthrop':'@Winthrop_MBB' } bigten = { 'Illinois':'@IlliniMBB', 'Indiana':'@IndianaMBB', 'Iowa':'@iowaHoops', 'Maryland':'@TerrapinHoops', 'Michigan':'@umichbball', 'Michigan St.':'@MSU_Basketball', 'Minnesota':'@GopherMBB', 'Nebraska':'@HuskerHoops', 'Northwestern':'@NUMensBball', 'Ohio St.':'@OhioStateHoops', 'Penn St.':'@PennStateMBB', 'Purdue':'@BoilerBall', 'Rutgers':'@RutgersMBB', 'Wisconsin':'@BadgerMBB' } bigwest = { 'Cal Poly': '@calpolymbb', 'UC Irvine':'@UCImbb', 'UC Davis':'@ucdavismbb', 'CSU Northridge':'@CSUNMBB', 'Hawai\'i': '@HawaiiMBB', 'CSU Fullerton': '@FullertonMBB', 'Long Beach State': '@LBSUhoops', 'UC Riverside': '@UCRMBB', 'UC Santa Barbara':'@UCSBbasketball' } cusa = { 'North Texas':'@MeanGreenMBB', 'Western Ky.': '@WKUBasketball', 'Louisiana Tech': '@LATechHoops', 'Florida International': '@FIUHoops', 'Charlotte': '@CharlotteMBB', 'Old Dominion': '@ODUMensHoops', 'UAB': '@UAB_MBB', 'Marshall': '@HerdMBB', 'Florida Atlantic': '@FAU_Hoops', 'UTSA': '@UTSAMBB', 'Rice': '@RiceBasketball', 'Southern Miss': '@SouthernMissMBB', 'UTEP': '@UTEP_MBB', 'Middle Tennessee': '@MT_MBB' } caa = { 'Hofstra':'@HofstraMBB', 'William & Mary':'@WMTribeMBB', 'Delaware': '@DelawareMBB', 'Towson': '@Towson_MBB', 'Charleston': '@CofCBasketball', 'Northeastern': '@GoNUmbasketball', 'Drexel': '@DrexelMBB', 'Elon': '@ElonMBasketball', 'UNC Wilmington': '@uncwmenshoops', 'James Madison': '@JMUMBasketball' } horizon = { 'Wright St.':'@WSU_MBB', 'Northern Kentucky': '@NKUNorseMBB', 'Youngstown St.': '@YSUMensHoops', 'Green Bay': '@gbphoenixmbb', 'UIC': '@UICFlamesMBB', 'Milwaukee': '@MKE_MBB', 'Cleveland State': '@CSU_Basketball', 'Oakland': '@OaklandMBB' } ivy = { 'Yale':'@YaleMBasketball', 'Princeton': '@PrincetonMBB', 'Brown': '@BrownBasketball', 'Pennsylvania': '@PennBasketball', 'Harvard': '@HarvardMBB', 'Dartmouth': '@DartmouthMBK', 'Cornell': '@CUBigRedHoops', 'Columbia': '@CULionsMBB' } maac = { 'Saint Peter\'s':'@PeacocksMBB', 'Siena': '@SienaMBB', 'Rider': '@RiderMBB', 'Monmouth': '@MonmouthBBall', 'Manhattan': '@JaspersMBB', 'Niagara': '@NiagaraMBB', 'Iona': '@IonaGaelsMBB', 'Quinnipiac': '@QU_MBB', 'Fairfield': '@StagsMensBball', 'Marist': '@MaristMBB', 'Canisius': '@Griffs_MBB' } mac = { 'Bowling Green': '@BGSUMHoops', 'Akron': '@ZipsMBB', 'Kent St.': '@KentStMBB', 'Buffalo': '@UBmenshoops', 'Ohio': '@OhioMBasketball', 'Miami (OH)': '@MiamiOH_BBall', 'Northern Ill.': '@GoHuskiesMBB', 'Ball St.': '@BallStateMBB', 'Central Mich.': '@CMUMensBBall', 'Eastern Mich.': '@EMUHoops', 'Western Mich.': '@WMUMBB', 'Toledo': '@ToledoMBB' } meac = { 'N.C. A&T': '@ATAggieHoops', 'N.C. Central': '@NCCU_MBB', 'Norfolk St.': '@NSU_BBALL', 'Florida A&M': '@famu_mbb', 'Bethune-Cookman': '@BCUhoops', 'Morgan St.': '@MorganStBears', 'South Carolina St.': '@SCStateAthletic', 'Coppin St.': '@CoppinMBB', 'UMES': '@ESHawksHoops', 'Delaware St.': '@DelawareMBB', 'Howard': '@HUMensBB' } mvc = { 'UNI':'@UNImbb', 'Loyola Chicago':'@RamblersMBB', 'Bradley': '@bradleyumbb', 'Indiana St.': '@IndStMBB', 'Southern Ill.': '@SIU_Basketball', 'Valparaiso': '@ValpoBasketball', 'Missouri St.': '@MSUBearsHoops', 'Drake': '@DrakeBulldogsMB', 'Illinois St.': '@Redbird_MBB', 'Evansville': '@UEAthletics_MBB' } mountainwest = { 'San Diego St.':'@Aztec_MBB', 'Utah St.': '@USUBasketball', 'Nevada': '@NevadaHoops', 'UNLV': '@TheRunninRebels', 'Boise St.': '@BroncoSportsMBB', 'Colorado St.': '@CSUMBasketball', 'New Mexico': 'UNMLoboMBB', 'Fresno St.': '@FresnoStateMBB', 'Air Force': '@AF_MBB', 'San Jose St.': '@SanJoseStateMBB', 'Wyoming': '@wyo_mbb' } nec = { 'Merrimack':'@MerrimackMBB', 'Saint Francis (PA)' : '@RedFlashMBB', 'Robert Morris': '@RMUMBasketball', 'Sacred Heart': '@SHUBigRed', 'LIU': '@LIUBasketball', 'Fairleigh Dickinson': '@FDUKnightsMBB', 'Mount St. Mary\'s': '@MountHoops', 'Bryant': '@BryantHoops', 'St. Francis Brooklyn': '@sfbkmbb', 'Wagner': '@Wagner_MBB', 'Central Conn. St': '@CCSU_MBB' } ovc = { 'Belmont': '@BelmontMBB', 'Murray St.': '@RacersHoops', 'Austin Peay':'@AustinPeayMBB', 'Eastern Ky.': '@EKUHoops', 'Tennessee St.': '@TSUTigersMBB', 'Eastern Ill.': '@eiubasketball', 'Jacksonville St.': '@JSUGamecocksMBB', 'Morehead St.': '@MSUEaglesMBB', 'Tennessee Tech': '@TTU_Basketball', 'UT Martin': '@SkyhawkHoops', 'SIUE': '@SIUEMBB', 'Southeast Mo. St.': '@SEMOMBB' } bigteams= {**a10, **acc, **asun, **ameast, **american, **big12, **bigeast, **bigsky, **bigsouth, **bigten, **bigwest, **cusa, **caa, **horizon, **ivy, **maac, **mac, **meac, **mvc, **mountainwest, **nec, **ovc, **teams}
[ "noreply@github.com" ]
noreply@github.com
3ef46392ca1c5bbd793009df1348cd98028e338a
26ec21923453fd402dff7c209723ec5a429c9639
/variation/sql.py
b0113b64978f8f616d59962cf9f879a5af5c7d99
[]
no_license
xiar/vpduserv
df5346f59fe582457197e34cc4140e8eb44ecaa7
9d985aaf48c3b140b0bd1b1589dac1c21a6c1982
refs/heads/master
2021-01-15T09:09:18.731127
2016-07-04T02:17:25
2016-07-04T02:17:25
57,357,403
1
0
null
2016-04-29T05:40:22
2016-04-29T05:40:22
null
UTF-8
Python
false
false
7,898
py
# # SNMP Simulator, http://snmpsim.sourceforge.net # # Managed value variation module: simulate a writable Agent using # SQL backend for storing Managed Objects # # Module initialization parameters are dbtype:<dbms>,dboptions:<options> # # Expects to work a table of the following layout: # CREATE TABLE <tablename> (oid text, tag text, value text, maxaccess text) # from snmpsim.grammar.snmprec import SnmprecGrammar from snmpsim.mltsplit import split from snmpsim import error, log from pysnmp.smi import error as Error import os isolationLevels = { '0': 'READ UNCOMMITTED', '1': 'READ COMMITTED', '2': 'REPEATABLE READ', '3': 'SERIALIZABLE' } moduleContext = {} def init(**context): options = {} if context['options']: options.update( dict([split(x, ':') for x in split(context['options'], ',')]) ) if 'dbtype' not in options: raise error.SnmpsimError('database type not specified') db = __import__( options['dbtype'], globals(), locals(), options['dbtype'].split('.')[:-1] ) if 'dboptions' in options: connectParams = {'database': options['dboptions']} else: connectParams = dict( [(k, options[k]) for k in options if k in ('host', 'port', 'user', 'passwd', 'password', 'db', 'database', 'unix_socket', 'named_pipe')] ) for k in 'port', 'connect_timeout': if k in connectParams: connectParams[k] = int(connectParams[k]) if not connectParams: raise error.SnmpsimError('database connect parameters not specified') moduleContext['dbConn'] = dbConn = db.connect(**connectParams) moduleContext['dbTable'] = dbTable = options.get('dbtable', 'snmprec') moduleContext['isolationLevel'] = options.get('isolationlevel', '1') if moduleContext['isolationLevel'] not in isolationLevels: raise error.SnmpsimError('unknown SQL transaction isolation level %s' % moduleContext['isolationLevel']) if not os.path.exists("/tmp/inform"): os.mkfifo('/tmp/inform') try: moduleContext['inform'] = inform = os.open("/tmp/inform", os.O_WRONLY | os.O_NONBLOCK) except Exception, ex: raise error.SnmpsimError('---> Infrasim: {0}: {1}'.format(Exception, ex)) def variate(oid, tag, value, **context): if 'dbConn' in moduleContext: dbConn = moduleContext['dbConn'] else: raise error.SnmpsimError('variation module not initialized') cursor = dbConn.cursor() try: cursor.execute( 'set session transaction isolation level %s' % moduleContext['isolationLevel'] ) cursor.fetchall() except: # non-MySQL/Postgres pass if value: dbTable = value.split(',').pop(0) elif 'dbTable' in moduleContext: dbTable = moduleContext['dbTable'] else: log.msg('SQL table not specified for OID %s' % (context['origOid'],)) return context['origOid'], tag, context['errorStatus'] origOid = context['origOid'] sqlOid = '.'.join(['%10s' % x for x in str(origOid).split('.')]) if context['setFlag']: if 'hexvalue' in context: textTag = context['hextag'] textValue = context['hexvalue'] else: textTag = SnmprecGrammar().getTagByType(context['origValue']) textValue = str(context['origValue']) cursor.execute( 'select maxaccess,tag,value from %s where oid=\'%s\' limit 1' % (dbTable, sqlOid) ) resultset = cursor.fetchone() if resultset: maxaccess = resultset[0] if maxaccess != 'read-write': return origOid, tag, context['errorStatus'] value_written = textValue try: value_settings = {} value_settings = dict([split(x, '=') for x in split(resultset[2], ',')]) print value_settings # if detected error mode, raise an error if 'mode' in value_settings and \ value_settings['mode'] == 'error': raise Error.WrongValueError(name=origOid, idx=max(0, context['varsTotal'] - context['varsRemaining'] - 1)) elif 'mode' in value_settings and \ value_settings['mode'] == 'normal': value_written = "mode=" + value_settings['mode'] + \ ",value=" + textValue else: return origOid, tag, context['errorStatus'] except Error.WrongValueError: cursor.close() raise Error.WrongValueError(name=origOid, idx=max(0, context['varsTotal'] - context['varsRemaining'] - 1)) except: pass cursor.execute( 'update %s set tag=\'%s\',value=\'%s\' where oid=\'%s\'' % (dbTable, textTag, value_written, sqlOid) ) inform = moduleContext.get('inform') try: value = str(origOid) + " " + textValue written_len = os.write(inform, value) if written_len != len(value): log.msg("--->Infrasim: Expected length %d, actual length %d\n" % (len(str(origOid)), written_len)) cursor.close() return origOid, tag, context['errorStatus'] except Exception, ex: log.msg("--->Infrasim: {0}".format(ex)) cursor.close() return origOid, tag, context['errorStatus'] else: cursor.close() raise Error.NoSuchInstanceError(name=origOid, idx=max(0, context['varsTotal'] - context['varsRemaining'] - 1)) if context['varsRemaining'] == 0: # last OID in PDU dbConn.commit() cursor.close() return origOid, textTag, context['origValue'] else: if context['nextFlag']: cursor.execute('select oid from %s where oid>\'%s\' order by oid limit 1' % (dbTable, sqlOid)) resultset = cursor.fetchone() if resultset: origOid = origOid.clone( '.'.join([x.strip() for x in str(resultset[0]).split('.')]) ) sqlOid = '.'.join(['%10s' % x for x in str(origOid).split('.')]) else: cursor.close() return origOid, tag, context['errorStatus'] cursor.execute('select tag, value from %s where oid=\'%s\' limit 1' % (dbTable, sqlOid)) resultset = cursor.fetchone() cursor.close() if resultset: try: value_settings = {} value_settings = \ dict([split(x, '=') for x in split(resultset[1], ',')]) print value_settings if 'mode' in value_settings: return origOid, str(resultset[0]), str(value_settings['value']) except: pass return origOid, str(resultset[0]), str(resultset[1]) else: return origOid, tag, context['errorStatus'] def shutdown(**context): dbConn = moduleContext.get('dbConn') if dbConn: if 'mode' in context and context['mode'] == 'recording': dbConn.commit() dbConn.close() inform = moduleContext.get('inform') if inform: os.close(inform)
[ "helloarys@gmail.com" ]
helloarys@gmail.com
97ef40139203515bd0014bc3608ad7be519a4032
39ca327035771b6b80e0eb18e5712f9a56e5d9e6
/ipUtils/ipUtil.py
26522f42cdd8ccbf685a1063286371747176e786
[]
no_license
D5quar3/Code
fe7bf9396b4f53243837ca9b523ce0ca012a6b85
5f2328f91f7c3c2f96fae894cfb2f1cff3c78c74
refs/heads/master
2016-09-10T15:37:00.948363
2015-04-03T18:01:59
2015-04-03T18:01:59
33,375,684
0
0
null
null
null
null
UTF-8
Python
false
false
583
py
#! /usr/bin/python2.7 import os import re class ipUtil: def __init__(self, ip): self.ip = ip def scan(self): open('pingResults.txt', 'w').close os.system('nmap -sn -oG pingResults.txt '+ self.ip) def connected(self): pat = re.compile('Status: Up') resfile = open('pingResults.txt', 'r') found = False for line in resfile: con = pat.search(line) if con: found = True if found: return "ONLINE" else: return "OFFLINE"
[ "builderofrobots@yahoo.com" ]
builderofrobots@yahoo.com
e89461a51e52313d597915885da1df109637baae
ae288b9604ee86b471d698023fce03738b578544
/lib/system/__init__.py
d3474854c5d8888f77545f1a7a11a08f805ffc55
[]
no_license
snaress/studio
a8421a0772600494859ba86daace4bf499f8e055
90f4fc50ca9541c0d70cb381c8002ef8a3ce8087
refs/heads/master
2021-01-17T05:49:57.193795
2016-02-07T13:57:24
2016-02-07T13:57:24
25,691,833
0
0
null
null
null
null
UTF-8
Python
false
false
147
py
import os #-- Package Var --# toolPath = os.path.normpath(os.path.dirname(__file__)) toolName = toolPath.split(os.sep)[-1] toolPack = __package__
[ "jln.buisseret@gmail.com" ]
jln.buisseret@gmail.com
bd1ab995c82446f03ced54cf6ec76668c26fe90c
25270c94477bb0e00cbd5d070ed1e7bbea04f9c2
/retina/efnet.py
01ccc169d402197a7e9ad7260135802ecf790e6f
[]
no_license
nvvaulin/icevision2019
610ff095bb247663b07dd00dfc46c690e3aa9f19
5eeb5122b1faab96ee7f3e7ff2ec871d9f3923b4
refs/heads/master
2022-10-30T21:41:05.681326
2019-07-15T13:52:01
2019-07-15T13:55:42
207,381,303
0
1
null
2022-10-21T10:50:28
2019-09-09T18:50:14
Jupyter Notebook
UTF-8
Python
false
false
11,129
py
# from collections import namedtuple # from maskrcnn_benchmark.utils.registry import Registry # from maskrcnn_benchmark.modeling.make_layers import group_norm # from maskrcnn_benchmark.layers import FrozenBatchNorm2d from torch import nn import torch from torch import nn from torch.nn import functional as F from efficientnet_pytorch import EfficientNet from efficientnet_pytorch.utils import relu_fn import sys sys.path.append('dropblock') from efnet_utils import ( relu_fn, round_filters, round_repeats, drop_connect, get_same_padding_conv2d, get_model_params, efficientnet_params, load_pretrained_weights, ) class MBConvBlock(nn.Module): """ Mobile Inverted Residual Bottleneck Block Args: block_args (namedtuple): BlockArgs, see above global_params (namedtuple): GlobalParam, see above Attributes: has_se (bool): Whether the block contains a Squeeze and Excitation layer. """ def __init__(self, block_args, global_params, norm_func): super().__init__() #self.dropblock = LinearScheduler(DropBlock2D(block_size=3, drop_prob=0), 0, 0.1, 1000) self._block_args = block_args self._bn_mom = 1 - global_params.batch_norm_momentum self._bn_eps = global_params.batch_norm_epsilon self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1) self.id_skip = block_args.id_skip # skip connection and drop connect # Get static or dynamic convolution depending on image size Conv2d = get_same_padding_conv2d(image_size=global_params.image_size) # Expansion phase inp = self._block_args.input_filters # number of input channels oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels if self._block_args.expand_ratio != 1: self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) # self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) self._bn0 = norm_func(oup) # Depthwise convolution phase k = self._block_args.kernel_size s = self._block_args.stride self._depthwise_conv = Conv2d( in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise kernel_size=k, stride=s, bias=False) # self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) self._bn1 = norm_func(oup) # Squeeze and Excitation layer, if desired if self.has_se: num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio)) self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1) self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1) # Output phase final_oup = self._block_args.output_filters self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False) # self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) self._bn2 = norm_func(final_oup) def forward(self, inputs, drop_connect_rate=None): """ :param inputs: input tensor :param drop_connect_rate: drop connect rate (float, between 0 and 1) :return: output of block """ # Expansion and Depthwise Convolution x = inputs if self._block_args.expand_ratio != 1: x = relu_fn(self._bn0(self._expand_conv(inputs))) x = relu_fn(self._bn1(self._depthwise_conv(x))) # Squeeze and Excitation if self.has_se: x_squeezed = F.adaptive_avg_pool2d(x, 1) x_squeezed = self._se_expand(relu_fn(self._se_reduce(x_squeezed))) x = torch.sigmoid(x_squeezed) * x x = self._bn2((self._project_conv(x))) # x = self._bn2(self.dropblock(self._project_conv(x))) # Skip connection and drop connect input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters: if drop_connect_rate: x = drop_connect(x, p=drop_connect_rate, training=self.training) x = x + (inputs) # skip connection return x # class EfficientNetFeaturesExtractor(nn.Module): # def __init__(self): # super().__init__() # # self.backbone = EfficientNet.from_pretrained('efficientnet-b0') # # self.feature_layers = (4, 10, 15) # self.backbone = EfficientNet.from_pretrained('efficientnet-b4') # self.feature_layers = (9, 21, 31) # # def forward(self, inputs): # outputs = [] # # # Stem # x = relu_fn(self.backbone._bn0(self.backbone._conv_stem(inputs))) # # # Blocks # for idx, block in enumerate(self.backbone._blocks): # drop_connect_rate = self.backbone._global_params.drop_connect_rate # if drop_connect_rate: # drop_connect_rate *= float(idx) / len(self.backbone._blocks) # x = block(x, drop_connect_rate=drop_connect_rate) # if idx in self.feature_layers: # outputs.append(x) # return outputs class EfficientNet(nn.Module): """ An EfficientNet model. Most easily loaded with the .from_name or .from_pretrained methods Args: blocks_args (list): A list of BlockArgs to construct blocks global_params (namedtuple): A set of GlobalParams shared between blocks Example: model = EfficientNet.from_pretrained('efficientnet-b0') """ def __init__(self, blocks_args, global_params): super().__init__() Conv2d = get_same_padding_conv2d(image_size=None) # block_args, global_params = get_model_params(cfg.BACKBONE.CONV_BODY, override_params=False) self._blocks_args, self._global_params = blocks_args, global_params # self._blocks_args, self._global_params = get_model_params('efficientnet-b4', override_params=False) self._features_idx = set((9, 21, 31)) # self._features_idx = set((7, 15, 22)) self._fpn_in_channels = [40, 112, 320] self._fpn_out_channels = 320 # Batch norm parameters bn_mom = 1 - self._global_params.batch_norm_momentum bn_eps = self._global_params.batch_norm_epsilon # Stem in_channels = 3 # rgb out_channels = round_filters(32, self._global_params) # number of output channels self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) # self._norm_func = lambda out: nn.GroupNorm(8, out) self._norm_func = lambda out: nn.BatchNorm2d(out) # if cfg.MODEL.EFNET.BN == 'GN': # self._norm_func = group_norm # else: # self._norm_func = FrozenBatchNorm2d # self._bn0 = FrozenBatchNorm2d(out_channels) # self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) self._bn0 = self._norm_func(out_channels) # Build blocks self._blocks = nn.ModuleList([]) for block_args in self._blocks_args: # Update block input and output filters based on depth multiplier. block_args = block_args._replace( input_filters=round_filters(block_args.input_filters, self._global_params), output_filters=round_filters(block_args.output_filters, self._global_params), num_repeat=round_repeats(block_args.num_repeat, self._global_params) ) # The first block needs to take care of stride and filter size increase. self._blocks.append(MBConvBlock(block_args, self._global_params, self._norm_func)) if block_args.num_repeat > 1: block_args = block_args._replace(input_filters=block_args.output_filters, stride=1) for _ in range(block_args.num_repeat - 1): self._blocks.append(MBConvBlock(block_args, self._global_params, self._norm_func)) # Head in_channels = block_args.output_filters # output of final block out_channels = round_filters(1280, self._global_params) self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False) # self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) self._bn1 = self._norm_func(out_channels) # Final linear layer self._dropout = self._global_params.dropout_rate self._fc = nn.Linear(out_channels, self._global_params.num_classes) def forward(self, inputs): outputs = [] # Stem x = relu_fn(self._bn0(self._conv_stem(inputs))) # Blocks for idx, block in enumerate(self._blocks): drop_connect_rate = self._global_params.drop_connect_rate if drop_connect_rate: drop_connect_rate *= float(idx) / len(self._blocks) x = block(x, drop_connect_rate=drop_connect_rate) if idx in self._features_idx: outputs.append(x) return outputs # def forward(self, inputs): # """ Calls extract_features to extract features, applies final linear layer, and returns logits. """ # # # Convolution layers # x = self.extract_features(inputs) # # # Pooling and final linear layer # x = F.adaptive_avg_pool2d(x, 1).squeeze(-1).squeeze(-1) # if self._dropout: # x = F.dropout(x, p=self._dropout, training=self.training) # x = self._fc(x) # return x @classmethod def from_name(cls, model_name, override_params=None): cls._check_model_name_is_valid(model_name) blocks_args, global_params = get_model_params(model_name, override_params) return EfficientNet(blocks_args, global_params) @classmethod def from_pretrained(cls, model_name, num_classes=1000): model = EfficientNet.from_name(model_name, override_params={'num_classes': num_classes}) load_pretrained_weights(model, model_name, load_fc=(num_classes == 1000)) return model @classmethod def get_image_size(cls, model_name): cls._check_model_name_is_valid(model_name) _, _, res, _ = efficientnet_params(model_name) return res @classmethod def _check_model_name_is_valid(cls, model_name, also_need_pretrained_weights=False): """ Validates model name. None that pretrained weights are only available for the first four models (efficientnet-b{i} for i in 0,1,2,3) at the moment. """ num_models = 4 if also_need_pretrained_weights else 8 valid_models = ['efficientnet_b'+str(i) for i in range(num_models)] if model_name.replace('-','_') not in valid_models: raise ValueError('model_name should be one of: ' + ', '.join(valid_models))
[ "itisgrisha@gmail.com" ]
itisgrisha@gmail.com
8d330fc84abbde5b8b43600d100dc19c4363f696
f18beb468016da1d4360773c55b7313a97c9a057
/document_similarity/views.py
abd6dc5ca178def4e80d1499f9a1f6f370128fce
[]
no_license
BilgiAILAB/bilgi-ai
e40e6126c562812457478960308e40e07a3e510e
fdb1c4b2568e657c38c2a288b1ade217e1bc628b
refs/heads/main
2022-12-30T06:12:27.942876
2020-09-17T09:09:34
2020-09-17T09:09:34
305,128,543
2
1
null
null
null
null
UTF-8
Python
false
false
4,858
py
import json from django.http import HttpResponseRedirect from django.shortcuts import render, get_object_or_404, redirect from django.urls import reverse from document_similarity.algorithms.similarity import TFIDFCosineSimilarity, TFIDFEuclideanDistance, \ TFIDFManhattanDistance, word2VecCosineSimilarity, word2VecEuclideanDistance, \ word2VecManhattanDistance from document_similarity.models import Report from project.models import Project def similarity_algorithms(request, pk): project = get_object_or_404(Project, pk=pk) reports = Report.objects.filter(project_id=pk) content = {'project': project, 'reports': reports, 'title': f'Document Similarity - {project.title}'} breadcrumb = { "Projects": reverse('all_projects'), project.title: reverse('show_project', args=[project.id]), "Document Similarity": "" } content['breadcrumb'] = breadcrumb return render(request, 'document_similarity/index.html', content) def apply_similarity_algorithm(request, pk, algorithm): project = get_object_or_404(Project, pk=pk) reports = Report.objects.filter(project_id=pk, algorithm=algorithm.lower()) content = {'project': project, 'algorithm': algorithm, 'reports': reports, 'files': project.get_files(), 'title': f'{algorithm.upper()} - {project.title}'} breadcrumb = { "Projects": reverse('all_projects'), project.title: reverse('show_project', args=[project.id]), "Document Similarity": reverse('similarity_algorithms', args=[pk]), algorithm.upper(): "" } content['breadcrumb'] = breadcrumb if request.method == 'POST': selected_file_id = int(request.POST['file']) files = project.get_files() corpus = [] index = 0 selected_document_index = 0 selected_document_name = 0 for file in files: if file.id == selected_file_id: selected_document_index = index selected_document_name = file.filename() index += 1 file_read = open(file.file.path, "r", encoding='utf8') lines = file_read.read() file_read.close() corpus.append(lines) if algorithm.lower() == 'tfidf-cos': outputs = TFIDFCosineSimilarity(selected_document_index, corpus) elif algorithm.lower() == 'tfidf-euc': outputs = TFIDFEuclideanDistance(selected_document_index, corpus) elif algorithm.lower() == 'tfidf-man': outputs = TFIDFManhattanDistance(selected_document_index, corpus) elif algorithm.lower() == 'word2vec-cos': outputs = word2VecCosineSimilarity(selected_document_index, corpus) elif algorithm.lower() == 'word2vec-euc': outputs = word2VecEuclideanDistance(selected_document_index, corpus) elif algorithm.lower() == 'word2vec-man': outputs = word2VecManhattanDistance(selected_document_index, corpus) content['outputs'] = outputs content['selected_document_index'] = selected_document_index report = Report() report.project = project report.algorithm = algorithm.lower() report.all_data = json.dumps(outputs, separators=(',', ':')) report.selected_document_index = selected_document_index report.selected_document_name = selected_document_name report.save() return redirect('view_similarity_report', project.id, algorithm, report.id) return render(request, 'document_similarity/params.html', content) def view_similarity_report(request, project_pk, algorithm, report_pk): project = get_object_or_404(Project, pk=project_pk) report = get_object_or_404(Report, pk=report_pk, algorithm=algorithm.lower()) files = project.get_files() content = { 'project': project, 'algorithm': algorithm, 'files': files, 'report': report, 'selected_document_index': report.selected_document_index, 'outputs': report.get_output(), 'title': f'{algorithm.upper()} Report - {project.title}' } breadcrumb = { "Projects": reverse('all_projects'), project.title: reverse('show_project', args=[project.id]), "Document Similarity": reverse('similarity_algorithms', args=[project_pk]), algorithm.upper(): reverse('apply_similarity_algorithm', args=[project_pk, algorithm]), f"Report (id:{report.id})": "" } content['breadcrumb'] = breadcrumb return render(request, 'document_similarity/report.html', content) def remove_similarity_report(request, project_pk, algorithm, report_pk): report = get_object_or_404(Report, pk=report_pk, project_id=project_pk) report.delete() return HttpResponseRedirect(request.META.get('HTTP_REFERER', '/'))
[ "ibrahimmdogann@gmail.com" ]
ibrahimmdogann@gmail.com
f9fff565d665aa7af67ad9802ff07e6822d6c84a
2647bf05a6ab9a84fe75e6697c45603f41066bce
/venv/bin/iptest3
a84d26d195cdf4a5f00190636d75eb0c2004b479
[]
no_license
angelaaaateng/Covertype_Analysis
425be5c762313ba808859f3901f65c49fbfef89f
12c689af7cf7035126f46f60c63547c4ad75c562
refs/heads/master
2022-10-11T09:08:13.267225
2019-09-06T14:53:03
2019-09-06T14:53:03
203,034,657
3
4
null
2022-09-30T18:35:19
2019-08-18T17:16:00
Python
UTF-8
Python
false
false
287
#!/Users/angelateng/Documents/GitHub/Covertype_Analysis/venv/bin/python # -*- coding: utf-8 -*- import re import sys from IPython.testing.iptestcontroller import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "angela_teng@alumni.brown.edu" ]
angela_teng@alumni.brown.edu
d897796ac05564cb9b593c41d1298cdd32285979
746a24367e2cc1804314e841003636268f2b8899
/learning_user/Contact_Info_Adminstration/migrations/0001_initial.py
d0b3232d8acbb2d2ca6c1ed979b91e462c139f16
[]
no_license
boudouara/MAKANI-cf
e2f8904e8c78d6e653a1f8b185659d0b82c8ebc6
32d33e8934d50c76c4df595b575716c03e6adee5
refs/heads/master
2021-05-17T00:52:23.997959
2020-06-01T22:48:06
2020-06-01T22:48:06
250,543,761
2
0
null
null
null
null
UTF-8
Python
false
false
1,251
py
# Generated by Django 2.2.5 on 2020-05-13 12:55 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Contact_MAKANI_CF', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('title_de_problem', models.CharField(max_length=200, unique=True)), ('slug', models.SlugField(max_length=200, unique=True)), ('updated_on', models.DateTimeField(auto_now=True)), ('content', models.TextField()), ('created_on', models.DateTimeField(auto_now_add=True)), ('status', models.IntegerField(choices=[(0, 'Draft'), (1, 'Publish')], default=0)), ('Perssone', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='blog_posts', to=settings.AUTH_USER_MODEL)), ], options={ 'ordering': ['-created_on'], }, ), ]
[ "n.boudouara@esi-sba.dz" ]
n.boudouara@esi-sba.dz
c7049fd951803d6bc6f19109023f9ea5c5d783c2
a3e4cc590667c444460d3a1f659f53f907da1783
/azure/mgmt/blueprint/models/assignment_deployment_job_result_py3.py
52b07be3a07c2f65071a62d8c0a9f5ad292585ef
[]
no_license
eduardomourar/azure-mgmt-blueprint
729d9c08915caab9e8029278da6dc87c4eaa44d6
153c3c63cb519350cb68752e07251e1e8ff26510
refs/heads/master
2020-05-27T02:26:42.436079
2019-11-11T11:52:14
2019-11-11T11:52:14
188,451,854
0
0
null
null
null
null
UTF-8
Python
false
false
1,334
py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.serialization import Model class AssignmentDeploymentJobResult(Model): """Result of each individual deployment in a blueprint assignment. :param error: Contains error details if deployment job failed. :type error: ~azure.mgmt.blueprint.models.AzureResourceManagerError :param resources: Resources created as result of the deployment job. :type resources: list[~azure.mgmt.blueprint.models.AssignmentJobCreatedResource] """ _attribute_map = { 'error': {'key': 'error', 'type': 'AzureResourceManagerError'}, 'resources': {'key': 'resources', 'type': '[AssignmentJobCreatedResource]'}, } def __init__(self, *, error=None, resources=None, **kwargs) -> None: super(AssignmentDeploymentJobResult, self).__init__(**kwargs) self.error = error self.resources = resources
[ "eduardo.rodrigues@sentia.com" ]
eduardo.rodrigues@sentia.com
e10b64e55faa704d71c38e5a32ab6cc065fcaf0b
fb7439e3a0663444660cd56e27e2d30ce94374ef
/WfaUWTSCore/src/setup.py
e8b89d0139f4de2885cf2d23257916f7c5cd66ec
[ "ISC" ]
permissive
wit732-fan/Wi-Fi-Wi-FiTestSuite10.0.0beta
54906cc65f95d7a35bc3964f2be67cd5ffa2e3f7
06fe5ec068cf24e3b202f7eb76ddc2b0249eb112
refs/heads/master
2023-03-16T06:02:31.651981
2016-07-08T17:22:48
2016-07-08T17:22:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
145
py
# Script to convert python source to EXE from distutils.core import setup import py2exe import sys import os pkg='wts.py' setup(console=[pkg])
[ "kdhara@wi-fi.org" ]
kdhara@wi-fi.org
ce9066f317bacb0738c785c8dd9f205f191c6c76
875c080f965dc8ecb41a522f9a021f31e38a791d
/ingestion/urls.py
8d5ecddec20f6ef93d185d1c7a185c89cee02aca
[]
no_license
cazino/Ingestion
bfee27fdd26363c590b540bc9321020f8190e876
d71688aa649093fa357bc8f88a3358f06fce2b43
refs/heads/master
2016-08-07T11:22:31.424104
2010-05-10T16:03:15
2010-05-10T16:03:15
null
0
0
null
null
null
null
UTF-8
Python
false
false
372
py
from django.conf.urls.defaults import * from mp3.main.models import Artist urlpatterns = patterns('', (r'^batch-view/$', 'mp3.ingestion.views.batch_view'), # Home page (r'^artist-lookup/$', 'mp3.ingestion.views.artist_lookup'), (r'^label-lookup/$', 'mp3.ingestion.views.label_lookup'), (r'^url-lookup/$', 'mp3.ingestion.views.url_lookup'), )
[ "emmanuel.cazenave@mondomix.net" ]
emmanuel.cazenave@mondomix.net
0542fdbf2773a0eff4356a8d9134630034300b1e
92a93729e82deceb6c8e96fe940a9910e05d604f
/week_6/8_6/likelion_flaskr_init/likelion_flaskr_init/migrations/versions/54185e9c8c99_.py
396146d26c9e60e3a251aadd97a073eda9941ee1
[]
no_license
mylovenr/homework
e2dbbdab7880030e39352daff92e508603059227
d0887361eb65c9cf954e68108cb4f929dd5e8ba3
refs/heads/master
2020-05-26T09:20:58.538100
2014-08-22T08:55:41
2014-08-22T08:55:41
null
0
0
null
null
null
null
UTF-8
Python
false
false
880
py
"""empty message Revision ID: 54185e9c8c99 Revises: 3262a3aa37b2 Create Date: 2014-08-06 19:55:26.214000 """ # revision identifiers, used by Alembic. revision = '54185e9c8c99' down_revision = '3262a3aa37b2' from alembic import op import sqlalchemy as sa def upgrade(): ### commands auto generated by Alembic - please adjust! ### op.add_column('article', sa.Column('author', sa.String(length=255), nullable=True)) op.add_column('article', sa.Column('category', sa.String(length=255), nullable=True)) op.add_column('article', sa.Column('date_created', sa.DateTime(), nullable=True)) ### end Alembic commands ### def downgrade(): ### commands auto generated by Alembic - please adjust! ### op.drop_column('article', 'date_created') op.drop_column('article', 'category') op.drop_column('article', 'author') ### end Alembic commands ###
[ "yangdayoung11@hanmail.net" ]
yangdayoung11@hanmail.net
1118dd6314a13fd14c81d69b52640f18ac825cfc
f995e2afc7ec7ba80aa44384036f087b192e8036
/process_data.py
2d6173e0077b0341abf30a141011bb244498d20a
[]
no_license
kevin2018pg/SentimentClassification
62fd80578e3789e78174a8a7bdeccba16fb9e794
b0802e41dcf40e4c317c6b749be5816055e7e145
refs/heads/master
2023-04-19T06:24:35.471842
2021-04-30T06:46:00
2021-04-30T06:46:00
357,917,475
1
0
null
null
null
null
UTF-8
Python
false
false
2,238
py
# -*- coding: utf-8 -*- # @Time : 2020/4/1 22:24 # @Author : Kevin # @File : process_data.py # @Version : python 3.6 # @Desc : 脏数据清洗 from tqdm import trange from harvesttext import HarvestText import pyhanlp import json import re import os def read_json(file): with open(file, 'r', encoding='utf-8') as f: data = json.load(f) print('%s -> data over' % file) return data def save_json(data, file, indent=1): with open(file, 'w', encoding='utf-8') as f: # dumps序列化时输出中文需设置 ensure_ascii=False f.write(json.dumps(data, indent=1, ensure_ascii=False)) print('data -> %s over' % file) def remove_url(src): # flags=re.MULTILINE多行模式匹配 vTEXT = re.sub(r'(https|http)?:\/\/(\w|\.|\/|\?|\=|\&|\%)*\b', '', src, flags=re.MULTILINE) return vTEXT # file: 数据文件 def clean_text(file, save_dir): ht = HarvestText() # CharTable中文文本规范化 CharTable = pyhanlp.JClass('com.hankcs.hanlp.dictionary.other.CharTable') data = read_json(file) num_null = 0 cleaned_data = [] for i in trange(len(data)): # 中文文本规范化 content = CharTable.convert(data[i]['content']) cleaned_content = remove_url(ht.clean_text(content, emoji=False)) # 过滤微博文本@和表情符号 num_null += 1 if cleaned_content == '' else 0 if 'train' in file and (not content or not cleaned_content): # 过滤train数据中自带的空数据或清洗后出现的空数据 continue if 'eval' in file or 'test' in file: cleaned_data.append({'id': data[i]['id'], 'content': cleaned_content}) else: cleaned_data.append({'id': data[i]['id'], 'content': cleaned_content, 'label': data[i]['label']}) filename = file.split('/')[-1] save_json(cleaned_data, os.path.join(save_dir, filename)) print('num data: ', num_null) clean_text('./data/raw/virus_train.txt', './data/clean') #clean_text('./data/raw/usual_train.txt', '../data/clean') # clean_text('./data/raw/virus_test.txt', './data/clean') # clean_text('./data/raw/usual_test.txt', '../data/clean')
[ "noreply@github.com" ]
noreply@github.com
d0e245f285f7028136bf38a0f29d170d8c9f4d5a
8bb4a472344fda15985ac322d14e8f4ad79c7553
/Python3-Core/src/test/prompto/translate/eme/TestCss.py
801cb78f8fe015a3e6257711209c57258ee542a1
[]
no_license
prompto/prompto-python3
c6b356f5af30c6826730ba7f2ad869f341983a2d
64bd3d97d4702cc912097d41d961f7ab3fd82bee
refs/heads/master
2022-12-24T12:33:16.251468
2022-11-27T17:37:56
2022-11-27T17:37:56
32,623,633
4
0
null
2019-05-04T11:06:05
2015-03-21T07:17:25
Python
UTF-8
Python
false
false
767
py
from prompto.parser.e.BaseEParserTest import BaseEParserTest class TestCss(BaseEParserTest): def setUp(self): super(type(self), self).setUp() def testCodeValue(self): self.compareResourceEME("css/codeValue.pec") def testCompositeValue(self): self.compareResourceEME("css/compositeValue.pec") def testHyphenName(self): self.compareResourceEME("css/hyphenName.pec") def testMultiValue(self): self.compareResourceEME("css/multiValue.pec") def testNumberValue(self): self.compareResourceEME("css/numberValue.pec") def testPixelValue(self): self.compareResourceEME("css/pixelValue.pec") def testTextValue(self): self.compareResourceEME("css/textValue.pec")
[ "eric.vergnaud@wanadoo.fr" ]
eric.vergnaud@wanadoo.fr
07260035fae3775eccc23a0180c11509e81f5968
6b9084d234c87d7597f97ec95808e13f599bf9a1
/algorithms/tracker/transt/builder.py
f300dc026d1df2f2ed64f5f4be27d71f5490de44
[]
no_license
LitingLin/ubiquitous-happiness
4b46234ce0cb29c4d27b00ec5a60d3eeb52c26fc
aae2d764e136ca4a36c054212b361dd7e8b22cba
refs/heads/main
2023-07-13T19:51:32.227633
2021-08-03T16:02:03
2021-08-03T16:02:03
316,664,903
1
0
null
null
null
null
UTF-8
Python
false
false
1,328
py
import torch from models.TransT.builder import build_transt from algorithms.tracker.transt.tracker import TransTTracker from data.tracking.methods.TransT.evaluation.builder import build_evaluation_data_processors def build_transt_tracker(network_config, evaluation_config, weight_path, device): device = torch.device(device) model = build_transt(network_config, False) state_dict = torch.load(weight_path, map_location='cpu')['model'] if network_config['version'] <= 2: for key in list(state_dict.keys()): key: str = key if key.startswith('head.class_embed'): state_dict[key.replace('head.class_embed', 'head.classification')] = state_dict.pop(key) elif key.startswith('head.bbox_embed'): state_dict[key.replace('head.bbox_embed', 'head.regression')] = state_dict.pop(key) if network_config['backbone']['type'] == 'swin_transformer': from models.backbone.swint.swin_transformer import _update_state_dict_ _update_state_dict_(state_dict, 'backbone.backbone.') model.load_state_dict(state_dict) data_processor, network_post_processor = build_evaluation_data_processors(network_config, evaluation_config, device) return TransTTracker(model, device, data_processor, network_post_processor)
[ "linliting06@live.com" ]
linliting06@live.com
70d103be4cf7033045a7bfe4abce7325e7410269
e0980f704a573894350e285f66f4cf390837238e
/.history/rocketman/settings/dev_20210104181322.py
6b33f05fcfb179db48a0b11ba3e3a32f5bde8bef
[]
no_license
rucpata/WagtailWebsite
28008474ec779d12ef43bceb61827168274a8b61
5aa44f51592f49c9a708fc5515ad877c6a29dfd9
refs/heads/main
2023-02-09T15:30:02.133415
2021-01-05T14:55:45
2021-01-05T14:55:45
303,961,094
0
0
null
null
null
null
UTF-8
Python
false
false
638
py
from .base import * # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = '0qjdxh8nibnbihjuj9*-%$#kx!i8y^wk6wt(h)@27m1g-9g$)v' # SECURITY WARNING: define the correct hosts in production! ALLOWED_HOSTS = ['localhost', 'rocketman.naukawagtail.com'] EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend' INSTALLED_APPS += [ 'debug_toolbar', ] MIDDLEWARE += [ 'debug_toolbar.middleware.DebugToolbarMiddleware', ] INTERNAL_IPS = [ '127.0.0.1', ] try: from .local import * except ImportError: pass
[ "rucinska.patrycja@gmail.com" ]
rucinska.patrycja@gmail.com
0d9dbb80048235a39c05ba7bae2ad9409f44e688
1a4c0183180c4a03528814ec2cbafcf32aaf7981
/wine-reviews/wine3.py
fdc4a19b3628f2d5b9ac7b0ae434b9bc0d332782
[]
no_license
Zaljun/CSC291-Text-Mining
3234d7bf5275fa09f6eaaa43018810c61a182adb
4dcc8feb5ccb3e9bae889e892bfc8ee4e0e2c255
refs/heads/master
2022-10-12T19:56:54.407165
2020-06-08T03:54:50
2020-06-08T03:54:50
270,258,177
0
0
null
null
null
null
UTF-8
Python
false
false
6,140
py
from nltk import word_tokenize from nltk import pos_tag from nltk.stem import PorterStemmer from nltk.corpus import stopwords import re import numpy as np import pandas as pd import gensim import gensim.corpora as corpora from gensim.utils import simple_preprocess from gensim.models import CoherenceModel from gensim import models,matutils import spacy from pprint import pprint from sklearn.feature_extraction.text import TfidfTransformer from sklearn.feature_extraction.text import CountVectorizer import os from sklearn.cluster import MiniBatchKMeans from sklearn import metrics from sklearn.decomposition import LatentDirichletAllocation from collections import Counter ''' def my_tokenizer(doc): all_tokens = word_tokenize(doc) all_pos_tags = pos_tag(all_tokens) tokens_no_punct = [(t, pos) for (t,pos) in all_pos_tags if len(pos) > 1] no_punkt_pos = [] for (t,pos) in tokens_no_punct: valid = False for ch in t: if ch.isalnum(): valid = True if valid: no_punkt_pos.append((t,pos)) lower_tokens = [(t.lower(),pos) for (t,pos) in no_punkt_pos] porter = PorterStemmer() stemmed_tokens = [(porter.stem(t),pos) for (t, pos) in lower_tokens] stoplist = stopwords.words('english') stoplist.extend(["wine"]) no_stopwords = [(t, pos) for (t, pos) in stemmed_tokens if t not in stoplist] good_tokens = [ t for (t,pos) in no_stopwords] return good_tokens ''' dir_file = os.getcwd() # returns path to current directory files_dir = os.listdir(dir_file) # list of files in current directory csv_files = [f for f in files_dir if f.endswith('csv')] wine_file = csv_files[0] fid = open(wine_file) wine_df = pd.read_csv(wine_file) wine_df.info # the columns # find unique variety variety_dict = Counter(wine_df['variety']) most_common = [t[0] for t in variety_dict.most_common(20)] print(variety_dict.most_common(20)) # main corpus = top 20 variety wine description variety_top_20 = wine_df['variety'].isin(most_common) # returns a bool index print(variety_top_20.shape) selected_wine = wine_df[variety_top_20] print(selected_wine.shape) stop_words = stopwords.words('english') #stop_words.extend(['from','subject','re','edu','use']) df = selected_wine #print(df.target_names.unique()) print(df.head()) data = df.description.values.tolist() data = [re.sub('\S*@\S*\s?', '', sent) for sent in data] data = [re.sub('\s+', ' ' , sent) for sent in data] data = [re.sub("\'", "", sent) for sent in data] print(data[:1]) def sent_to_words(sent): for sentence in sent: yield(gensim.utils.simple_preprocess(str(sentence), deacc = True)) data_words = list(sent_to_words(data)) print(data_words[:1]) # build bigram and trigram model bigram = gensim.models.Phrases(data_words, min_count = 5, threshold = 100) trigram = gensim.models.Phrases(bigram[data_words], threshold = 100) bigram_mod = gensim.models.phrases.Phraser(bigram) trigram_mod = gensim.models.phrases.Phraser(trigram) print(trigram_mod[bigram_mod[data_words[0]]]) # funtion for stopwords, bigram, trigram and lemmatization def remove_stopwords(texts): return [[word for word in simple_preprocess(str(doc)) if word not in stop_words] for doc in texts] def make_bigrams(texts): return [bigram_mod[doc] for doc in texts] def make_trigram(texts): return [trigram_mod[bigram_mod[doc]] for doc in texts] def lemmatization(texts, allowed_postags = ['NOUN', 'ADJ', 'VERB', 'ADV']): texts_out = [] for sent in texts: doc = nlp(" ".join(sent)) texts_out.append([token.lemma_ for token in doc if token.pos_ in allowed_postags]) return texts_out data_words_nostop = remove_stopwords(data_words) data_words_bigrams = make_bigrams(data_words_nostop) nlp = spacy.load('en', disable = ['parser', 'ner']) data_lemmatized = lemmatization(data_words_bigrams, allowed_postags = ['NOUN', 'ADJ', 'ADV', 'VERB']) print(data_lemmatized[:1]) def preprocessor(doc): data_words_nostop = remove_stopwords(doc) data_words_bigrams = make_bigrams(data_words_nostop) data_lemmatized = lemmatization(data_words_bigrams, allowed_postags = ['NOUN', 'ADJ', 'ADV', 'VERB']) return data_lemmatized # do clustering using minibatch K-means #vect = CountVectorizer(stop_words = 'english',lowercase = True, min_df = 10) vect = CountVectorizer(tokenizer = preprocessor, min_df = 10) counter= vect.fit_transform(selected_wine['description']) transf = TfidfTransformer(norm = 'l2', use_idf = True, smooth_idf = True, sublinear_tf = False) # TfidfTransformer takes the CountVectorizer output and computes the tf-idf tf_idf = transf.fit_transform(counter) k_clusters = 20 model = MiniBatchKMeans(n_clusters=k_clusters, init='k-means++', max_iter=200, batch_size=5000, n_init = 10) model.fit(tf_idf) # clustering results print("Top terms per cluster:") order_centroids = model.cluster_centers_.argsort()[:, ::-1] # sort and reverse terms = vect.get_feature_names() for i in range(k_clusters): print("Cluster %d:" % i), for ind in order_centroids[i, :10]: # print first 10 terms from the cluster print(' %s' % terms[ind]) print() # clustering score variety = selected_wine.variety.copy() variety = pd.Categorical(variety) print("Homogeneity: %0.3f" % metrics.homogeneity_score(variety, model.labels_)) print("Completeness: %0.3f" % metrics.completeness_score(variety, model.labels_)) print("V-measure: %0.3f" % metrics.v_measure_score(variety, model.labels_)) print("Adjusted Rand-Index: %.3f" % metrics.adjusted_rand_score(variety, model.labels_)) #print("Silhouette Coefficient: %0.3f" # % metrics.silhouette_score(X, km.labels_, sample_size=1000)) # check index_Chardonnay = selected_wine['variety'].isin(['Chardonnay']) print(sorted(Counter(model.labels_[index_Chardonnay]).items(),key = lambda x:(x[1], x[0]), reverse =True)) m = model.labels_[index_Chardonnay]
[ "noreply@github.com" ]
noreply@github.com
aa9180de291add55eec12d959476791d748a2f23
c49261d13dda2c4aa8219a9cc6c09a7bd2fde395
/day08/day08.py
adfbdf7fd86c08daddbd3f71db892282ed35412b
[]
no_license
endy-imam/advent-of-code-2020
39a3646642ae72283db1ed6f0ff14a148dc8e7a6
26180c2835c9332c4a3cd294186aae8ca9449cf4
refs/heads/main
2023-02-07T20:04:30.334144
2020-12-25T09:50:48
2020-12-25T09:50:48
null
0
0
null
null
null
null
UTF-8
Python
false
false
1,677
py
import os from utils import get_data, run, map_list # INPUT HELPERS def get_op(line): op, arg = line.split() return [op, int(arg)] # INPUT SECTION DIR_ROOT = os.path.dirname(__file__) instructions = map_list(get_op, get_data(DIR_ROOT).split('\n')) # HELPER FUNCTIONS def run_op(line, val): op, arg = instructions[line] line += arg if op == 'jmp' else 1 val += arg if op == 'acc' else 0 return line, val def switch_op(instructions, line): old_op = instructions[line][0] new_op = 'nop' if old_op == 'jmp' else 'jmp' instructions[line][0] = new_op # MAIN FUNCTIONS def part_one(): line_ran = set() line = acc = 0 while line not in line_ran and line < len(instructions): line_ran.add(line) line, acc = run_op(line, acc) return acc def part_two(): curr_instructions = instructions[::] prev_line = None traceback_stack = [] line_ran = set() line = acc = 0 while line < len(curr_instructions): # RUN FALLBACK if line in line_ran: if prev_line is not None: switch_op(curr_instructions, prev_line) line_ran, line, acc = traceback_stack.pop() switch_op(curr_instructions, line) prev_line = line # ADD TRACEBACK else: op, arg = curr_instructions[line] if op in 'jmp nop' and arg != 1: traceback = (line_ran.copy(), line, acc) traceback_stack.append(traceback) # RUN OP line_ran.add(line) line, acc = run_op(line, acc) return acc # RUNNING FUNCTION if __name__ == "__main__": run(part_one, part_two)
[ "imam.endy@gmail.com" ]
imam.endy@gmail.com
264216662e2222d193824bf147504092fba75191
2839ac802d1094976fdeaf603c15808de6f539e7
/venv/bin/pip
97a7a2f1f271a652a18167ef01ec8a9dec7140fb
[]
no_license
alishalabi/songs-i-know
9f38b063355d061720d53d65c96a41bf373606b8
a9cd6e94662154560e2f84a75a7f32c6fa18e639
refs/heads/master
2020-06-05T11:24:04.274007
2019-07-03T04:06:35
2019-07-03T04:06:35
192,421,245
0
0
null
null
null
null
UTF-8
Python
false
false
267
#!/Users/alishalabi/Desktop/dev/BEW1.3/songs-i-know/venv/bin/python3.7 # -*- coding: utf-8 -*- import re import sys from pip._internal import main if __name__ == '__main__': sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) sys.exit(main())
[ "alishalabi@Alis-MacBook-Pro-2.local" ]
alishalabi@Alis-MacBook-Pro-2.local