input
stringlengths
0
2.17k
instruction
stringlengths
18
2.94k
output
stringlengths
47
3.36k
import tempfile import pytest from pre_commit_hooks.pretty_format_json import pretty_format_json from testing.util import get_resource_path @pytest.mark.parametrize(('filename', 'expected_retval'), ( ('not_pretty_formatted_json.json', 1), ('pretty_formatted_json.json', 0), )) def test_pretty_format_json(filename, expected_retval): ret = pretty_format_json([get_resource_path(filename)]) assert ret == expected_retval def test_autofix_pretty_format_json(): toformat_file = tempfile.NamedTemporaryFile(delete=False, mode='w+') # copy our file to format there model_file = open(get_resource_path('not_pretty_formatted_json.json'), 'r') model_contents = model_file.read() model_file.close() toformat_file.write(model_contents) toformat_file.close() # now launch the autofix on that file ret = pretty_format_json(['--autofix', toformat_file.name]) # it should have formatted it assert ret == 1 # file already good ret = pretty_format_json([toformat_file.name]) assert ret == 0 def test_badfile_pretty_format_json(): ret = pretty_format_json([get_resource_path('ok_yaml.yaml')]) assert ret == 1
Write to temp directories in such a way that files get cleaned up
import io import pytest from pre_commit_hooks.pretty_format_json import pretty_format_json from testing.util import get_resource_path @pytest.mark.parametrize(('filename', 'expected_retval'), ( ('not_pretty_formatted_json.json', 1), ('pretty_formatted_json.json', 0), )) def test_pretty_format_json(filename, expected_retval): ret = pretty_format_json([get_resource_path(filename)]) assert ret == expected_retval def test_autofix_pretty_format_json(tmpdir): srcfile = tmpdir.join('to_be_json_formatted.json') with io.open(get_resource_path('not_pretty_formatted_json.json')) as f: srcfile.write_text(f.read(), 'UTF-8') # now launch the autofix on that file ret = pretty_format_json(['--autofix', srcfile.strpath]) # it should have formatted it assert ret == 1 # file was formatted (shouldn't trigger linter again) ret = pretty_format_json([srcfile.strpath]) assert ret == 0 def test_badfile_pretty_format_json(): ret = pretty_format_json([get_resource_path('ok_yaml.yaml')]) assert ret == 1
__author__ = 'jacob' import ROOT import numpy as np import os from root_numpy import root2array, root2rec, tree2rec # Look at r284484 data filename = os.path.join("data", "r284484.root") # Convert a TTree in a ROOT file into a NumPy structured array arr = root2array(filename) print(arr.dtype) # The TTree name is always optional if there is only one TTree in the file # Convert a TTree in a ROOT file into a NumPy record array rec = root2rec(filename) # Get the TTree from the ROOT file rfile = ROOT.TFile(filename)
Print out dtypes in .root file individually
__author__ = 'jacob' import ROOT import numpy as np import os from root_numpy import root2array, root2rec, tree2rec # Look at r284484 data filename = os.path.join("data", "r284484.root") # Convert a TTree in a ROOT file into a NumPy structured array arr = root2array(filename) for element in arr.dtype.names: print(element) print("\n") # The TTree name is always optional if there is only one TTree in the file # Convert a TTree in a ROOT file into a NumPy record array rec = root2rec(filename) # Get the TTree from the ROOT file rfile = ROOT.TFile(filename)
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. class ErrorCodes: UserCanceledError = 1 DeviceBusyError = 2 class WriteRequestFailedError(Exception): def __init__(self, code, message): super().__init__(message) self.code = code self.message = message
Replace error codes with error subclasses This provides the same information but is a cleaner solution for python
# Copyright (c) 2015 Ultimaker B.V. # Uranium is released under the terms of the AGPLv3 or higher. class WriteRequestFailedError(Exception): pass class UserCancelledError(WriteRequestFailedError): pass class PermissionDeniedError(WriteRequestFailedError): pass class DeviceBusyError(WriteRequestFailedError): pass
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('icekit', '0002_layout'), ('eventkit_fluentevent', '0001_initial'), ] operations = [ migrations.AddField( model_name='fluentevent', name='layout', field=models.ForeignKey(blank=True, to='icekit.Layout', null=True), preserve_default=True, ), ]
Update related name for `layout` field.
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('icekit', '0002_layout'), ('eventkit_fluentevent', '0001_initial'), ] operations = [ migrations.AddField( model_name='fluentevent', name='layout', field=models.ForeignKey(related_name='eventkit_fluentevent_fluentevent_related', blank=True, to='icekit.Layout', null=True), preserve_default=True, ), ]
import ephem from datetime import datetime def const(planet_name): # function name and parameters planet_class = getattr(ephem, planet_name) # sets ephem object class date_class = datetime.now() planet = planet_class() # sets planet variable south_bend = ephem.Observer() # Creates the Observer object south_bend.lat = '41.40' # latitude south_bend.lon = '-86.15' south_bend.date = date_class # sets date parameter planet.compute(south_bend) # calculates the location data print date_class print planet.ra, planet.dec print planet.alt, planet.az return ephem.constellation((planet.ra, planet.dec)) print const(raw_input('Planet: '))
Add menu to choose star or planet, print results.
import ephem from datetime import datetime def star(star_name): star = ephem.star(star_name) south_bend = ephem.Observer() date_class = datetime.now() south_bend.lat = '41.15' south_bend.lon = '-86.26' south_bend.date = date_class star.compute(south_bend) print date_class print "Mag ", star.mag print "RA ", star.ra print "Dec ", star.dec def const(planet_name): # function name and parameters planet_class = getattr(ephem, planet_name) # sets ephem object class date_class = datetime.now() planet = planet_class() # sets planet variable south_bend = ephem.Observer() # Creates the Observer object south_bend.lat = '41.40' # latitude south_bend.lon = '-86.15' south_bend.date = date_class # sets date parameter planet.compute(south_bend) # calculates the location data print 'Date ', date_class print 'RA ', planet.ra print 'Dec ', planet.dec print 'Alt ', planet.alt print 'Az ', planet.az return ephem.constellation((planet.ra, planet.dec)) print "Press 1 to find a star, 2 to find a planet" choice = raw_input('> ') if choice == '1': star(raw_input('Star: ')) else: const(raw_input('Planet: '))
from django.http import Http404 from django.http import HttpResponse from django.shortcuts import render from django.template import RequestContext, loader from .models import Theater # Default first page. Should be the search page. def index(request): return HttpResponse("Hello, world. You're at the ITDB_Main index. This is where you will be able to search.") # page for Theaters & theater details. Will show the details about a theater, and a list of Productions. def theaters(request): all_theaters_by_alpha = Theater.objects.order_by('name') context = RequestContext(request, {'all_theaters_by_alpha': all_theaters_by_alpha}) return render(request, 'ITDB_Main/theaters.html',context) def theater_detail(request, theater_id): try: theater = Theater.objects.get(pk=theater_id) except Theater.DoesNotExist: raise Http404("Theater does not exist") return render(request, 'ITDB_Main/theater_detail.html', {'theater' : theater}) # page for People def person(request): return HttpResponse("Page showing a single person - e.g. actor, director, writer, followed by a list of Productions") # page for Plays def play(request): return HttpResponse("Page showing a single play, followed by a list of Productions") # page for Productions def production(request): return HttpResponse("Page showing a single production, with details about theater and play, followed by a list of People")
Update theater view to use get_object_or_404 shortcut
from django.http import Http404 from django.http import HttpResponse from django.shortcuts import get_object_or_404, render from django.template import RequestContext, loader from .models import Theater # Default first page. Should be the search page. def index(request): return HttpResponse("Hello, world. You're at the ITDB_Main index. This is where you will be able to search.") # page for Theaters & theater details. Will show the details about a theater, and a list of Productions. def theaters(request): all_theaters_by_alpha = Theater.objects.order_by('name') context = RequestContext(request, {'all_theaters_by_alpha': all_theaters_by_alpha}) return render(request, 'ITDB_Main/theaters.html',context) def theater_detail(request, theater_id): theater = get_object_or_404(Theater, pk=theater_id) return render(request, 'ITDB_Main/theater_detail.html', {'theater' : theater}) # page for People def person(request): return HttpResponse("Page showing a single person - e.g. actor, director, writer, followed by a list of Productions") # page for Plays def play(request): return HttpResponse("Page showing a single play, followed by a list of Productions") # page for Productions def production(request): return HttpResponse("Page showing a single production, with details about theater and play, followed by a list of People")
import sqlite3 from datetime import datetime from SenseCells.tts import tts def show_all_notes(): conn = sqlite3.connect('memory.db') tts('Your notes are as follows:') cursor = conn.execute("SELECT notes FROM notes") for row in cursor: tts(row[0]) conn.commit() conn.close() def note_something(speech_text): conn = sqlite3.connect('memory.db') words_of_message = speech_text.split() words_of_message.remove('note') cleaned_message = ' '.join(words_of_message) conn.execute("INSERT INTO notes (notes, notes_date) VALUES (?, ?)", (cleaned_message, datetime.strftime(datetime.now(), '%d-%m-%Y'))) conn.commit() conn.close() tts('Your note has been saved.')
Remove unused line of code
import sqlite3 from datetime import datetime from SenseCells.tts import tts def show_all_notes(): conn = sqlite3.connect('memory.db') tts('Your notes are as follows:') cursor = conn.execute("SELECT notes FROM notes") for row in cursor: tts(row[0]) conn.close() def note_something(speech_text): conn = sqlite3.connect('memory.db') words_of_message = speech_text.split() words_of_message.remove('note') cleaned_message = ' '.join(words_of_message) conn.execute("INSERT INTO notes (notes, notes_date) VALUES (?, ?)", (cleaned_message, datetime.strftime(datetime.now(), '%d-%m-%Y'))) conn.commit() conn.close() tts('Your note has been saved.')
import bottle from bottle import response, request import json import jedi import logging app = bottle.Bottle( __name__ ) logger = logging.getLogger( __name__ ) @app.get( '/healthy' ) def healthy(): return _Json({}) @app.get( '/ready' ) def ready(): return _Json({}) @app.post( '/completions' ) def completion(): logger.info( 'received /completions request' ) script = _GetJediScript( request.json ) return _Json( { 'completions': [ { 'name': completion.name, 'description': completion.description, 'docstring': completion.docstring() } for completion in script.completions() ] } ) def _GetJediScript( request_data ): source = request_data[ 'source' ] line = request_data[ 'line' ] col = request_data[ 'col' ] path = request_data[ 'path' ] return jedi.Script( source, line, col, path ) def _Json( data ): response.content_type = 'application/json' return json.dumps( data )
Add more info for completions
import bottle from bottle import response, request import json import jedi import logging app = bottle.Bottle( __name__ ) logger = logging.getLogger( __name__ ) @app.get( '/healthy' ) def healthy(): return _Json({}) @app.get( '/ready' ) def ready(): return _Json({}) @app.post( '/completions' ) def completion(): logger.info( 'received /completions request' ) script = _GetJediScript( request.json ) return _Json( { 'completions': [ { 'name': completion.name, 'description': completion.description, 'docstring': completion.docstring(), 'module_path': completion.module_path, 'line': completion.line, 'column': completion.column } for completion in script.completions() ] } ) def _GetJediScript( request_data ): source = request_data[ 'source' ] line = request_data[ 'line' ] col = request_data[ 'col' ] path = request_data[ 'path' ] return jedi.Script( source, line, col, path ) def _Json( data ): response.content_type = 'application/json' return json.dumps( data )
# -*- coding: utf-8 -*- from django.http import HttpResponse from django.utils import simplejson def JSONResponse(data): return HttpResponse(simplejson.dumps(data), mimetype='application/json')
Fix JSONResponse to work without complaints on django 1.6
# -*- coding: utf-8 -*- from django.http import HttpResponse import json def JSONResponse(data): return HttpResponse(json.dumps(data), content_type='application/json')
#!/usr/bin/env python import sys from statusdb.db import connections as statusdb if len(sys.argv) == 1: sys.exit('Please provide a project name') prj = sys.argv[1] pcon = statusdb.ProjectSummaryConnection() prj_obj = pcon.get_entry(prj) prj_samples = prj_obj.get('samples',{}) print("NGI_id\tUser_id") for sample in sorted(prj_samples.keys()): user_name = prj_samples[sample].get('customer_name','') print("{}\t{}".format(sample, user_name))
Use tacas statusdb module instead
#!/usr/bin/env python import sys import os from taca.utils.statusdb import ProjectSummaryConnection from taca.utils.config import load_config if len(sys.argv) == 1: sys.exit('Please provide a project name') prj = sys.argv[1] statusdb_config = os.getenv('STATUS_DB_CONFIG') conf = load_config(statusdb_config) conf = conf.get('statusdb') pcon = ProjectSummaryConnection(config=conf) prj_obj = pcon.get_entry(prj) prj_samples = prj_obj.get('samples',{}) print("NGI_id\tUser_id") for sample in sorted(prj_samples.keys()): user_name = prj_samples[sample].get('customer_name','') print("{}\t{}".format(sample, user_name))
""" github3 ======= :copyright: (c) 2012 by Ian Cordasco :license: Modified BSD, see LICENSE for more details """ __title__ = 'github3' __author__ = 'Ian Cordasco' __license__ = 'Modified BSD' __copyright__ = 'Copyright 2012 Ian Cordasco' __version__ = '0.1a' from .api import * from .github import GitHub
Add link to the online docs in the module desc No reason not to have it there. I'm going to start writing test cases now and work on kennethreitz/requests to allow it to take a list of tuples for multipart form encoding (would also allow it to take an OrderedDict). Just waiting for the go-ahead from someone.
""" github3 ======= See http://github3py.rtfd.org/ for documentation. :copyright: (c) 2012 by Ian Cordasco :license: Modified BSD, see LICENSE for more details """ __title__ = 'github3' __author__ = 'Ian Cordasco' __license__ = 'Modified BSD' __copyright__ = 'Copyright 2012 Ian Cordasco' __version__ = '0.1a' from .api import * from .github import GitHub
#!/usr/bin/env python # -*- coding: utf-8 -*- from conans import ConanFile class EnttConan(ConanFile): name = "entt" description = "Gaming meets modern C++ - a fast and reliable entity-component system (ECS) and much more " topics = ("conan," "entt", "gaming", "entity", "ecs") url = "https://github.com/skypjack/entt" homepage = url author = "Michele Caini <[email protected]>" license = "MIT" exports = ["LICENSE"] exports_sources = ["src/*"] no_copy_source = True def package(self): self.copy(pattern="LICENSE", dst="licenses") self.copy(pattern="*", dst="include", src="src", keep_path=True) def package_id(self): self.info.header_only()
Conan: Support package in editable mode Add a method to the recipe that maps the include path to "src" when the package is put into "editable mode". See: https://docs.conan.io/en/latest/developing_packages/editable_packages.html
#!/usr/bin/env python # -*- coding: utf-8 -*- from conans import ConanFile class EnttConan(ConanFile): name = "entt" description = "Gaming meets modern C++ - a fast and reliable entity-component system (ECS) and much more " topics = ("conan," "entt", "gaming", "entity", "ecs") url = "https://github.com/skypjack/entt" homepage = url author = "Michele Caini <[email protected]>" license = "MIT" exports = ["LICENSE"] exports_sources = ["src/*"] no_copy_source = True def package(self): self.copy(pattern="LICENSE", dst="licenses") self.copy(pattern="*", dst="include", src="src", keep_path=True) def package_info(self): if not self.in_local_cache: self.cpp_info.includedirs = ["src"] def package_id(self): self.info.header_only()
from django.conf.urls import url, include from django.contrib import admin urlpatterns = [ url('^admin/', include(admin.site.urls)), ]
Fix tests for Django 2.0
from django.conf.urls import url from django.contrib import admin urlpatterns = [ url('^admin/', admin.site.urls), ]
import sys from .config import RawConfig from .exc import RunCommandsError from .run import run, partition_argv, read_run_args_from_file def main(argv=None): try: all_argv, run_argv, command_argv = partition_argv(argv) cli_args = run.parse_args(RawConfig(debug=False), run_argv) run_args = read_run_args_from_file(run) run_args.update(cli_args) run.implementation( None, all_argv=all_argv, run_argv=run_argv, command_argv=command_argv, cli_args=cli_args, **run_args) except RunCommandsError: return 1 return 0 if __name__ == '__main__': sys.exit(main())
Print error message before exiting in main() Amends 42af9a9985ec5409f7773d9daf9f8a68df291228
import sys from .config import RawConfig from .exc import RunCommandsError from .run import run, partition_argv, read_run_args_from_file from .util import printer def main(argv=None): try: all_argv, run_argv, command_argv = partition_argv(argv) cli_args = run.parse_args(RawConfig(debug=False), run_argv) run_args = read_run_args_from_file(run) run_args.update(cli_args) run.implementation( None, all_argv=all_argv, run_argv=run_argv, command_argv=command_argv, cli_args=cli_args, **run_args) except RunCommandsError as exc: printer.error(exc, file=sys.stderr) return 1 return 0 if __name__ == '__main__': sys.exit(main())
import data import model import numpy as np from keras import optimizers # Localize data through file system relative indexing method path = 'hcp_olivier/102816/MNINonLinear/Results/rfMRI_REST1_LR/rfMRI_REST1_LR.npy' # Use data loading library to load data a, b, y = data.generate_learning_set(np.load(path)) # Generate the model embedding_model, siamese_model = model.make_mlp_models(a.shape[1], embedding_dropout=0.2) optimizer = optimizers.SGD(lr=0.00001, momentum=0.9, nesterov=True) # optimizer = optimizers.Adam(lr=0.0001) siamese_model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) print(a.shape) print(a[:10]) trace = siamese_model.fit([a, b], y, validation_split=0.2, epochs=30, batch_size=16) print(trace.history['acc'][-1]) print(trace.history['val_acc'][-1])
Use linear models by default
import data import model import numpy as np from keras import optimizers # Localize data through file system relative indexing method path = 'hcp_olivier/102816/MNINonLinear/Results/rfMRI_REST1_LR/rfMRI_REST1_LR.npy' # Use data loading library to load data a, b, y = data.generate_learning_set(np.load(path)) # Generate the model embedding_model, siamese_model = model.make_linear_models(a.shape[1]) optimizer = optimizers.SGD(lr=0.00001, momentum=0.9, nesterov=True) # optimizer = optimizers.Adam(lr=0.0001) siamese_model.compile(optimizer=optimizer, loss='binary_crossentropy', metrics=['accuracy']) print("data shapes:") print(a.shape) print(b.shape) print(y.shape) trace = siamese_model.fit([a, b], y, validation_split=0.2, epochs=30, batch_size=16, shuffle=True) print(trace.history['acc'][-1]) print(trace.history['val_acc'][-1])
"""Example of expanding and unexpanding string variables in entry fields.""" from __future__ import print_function import bibpy import os def get_path_for(path): return os.path.join(os.path.dirname(os.path.abspath(__file__)), path) def print_entries(entries): print(os.linesep.join(map(str, entries))) print() if __name__ == '__main__': filename = get_path_for('../tests/data/string_variables.bib') entries, strings = bibpy.read_file(filename, format='relaxed')[:2] print("* String entries:") print_entries(strings) print("* Without string expansion:") print_entries(entries) # Expand string variables in-place bibpy.expand_strings(entries, strings, ignore_duplicates=False) print("* With string expansion:") print_entries(entries) # Unexpand string variables in-place bibpy.unexpand_strings(entries, strings, ignore_duplicates=False) print("* And without string expansion again:") print_entries(entries)
Fix ordering in string expansion example
"""Example of expanding and unexpanding string variables in entry fields.""" from __future__ import print_function import bibpy import os def get_path_for(path): return os.path.join(os.path.dirname(os.path.abspath(__file__)), path) def print_entries(entries): print(os.linesep.join(map(str, entries))) print() if __name__ == '__main__': filename = get_path_for('../tests/data/string_variables.bib') result = bibpy.read_file(filename, format='relaxed') entries, strings = result.entries, result.strings print("* String entries:") print_entries(strings) print("* Without string expansion:") print_entries(entries) # Expand string variables in-place bibpy.expand_strings(entries, strings, ignore_duplicates=False) print("* With string expansion:") print_entries(entries) # Unexpand string variables in-place bibpy.unexpand_strings(entries, strings, ignore_duplicates=False) print("* And without string expansion again:") print_entries(entries)
from ofxparse import OfxParser, AccountType, Account, Statement, Transaction __version__ = '0.10'
Set the version number to 0.11.wave
from ofxparse import OfxParser, AccountType, Account, Statement, Transaction __version__ = '0.11.wave'
from google.appengine.ext import db class CHPIncident(db.Model): CenterID = db.StringProperty(required=True) DispatchID = db.StringProperty(required=True) LogID = db.StringProperty(required=True) LogTime = db.DateTimeProperty() LogType = db.StringProperty() LogTypeID = db.StringProperty() Location = db.StringProperty() Area = db.StringProperty() ThomasBrothers = db.StringProperty() TBXY = db.StringProperty() LogDetails = db.BlobProperty() geolocation = db.GeoPtProperty() added = db.DateTimeProperty(auto_now_add=True) last_update = db.DateTimeProperty(auto_now=True)
Put a getStatus() method into the CHPIncident model. That's the right way to do it.
from google.appengine.ext import db from datetime import datetime, timedelta class CHPIncident(db.Model): CenterID = db.StringProperty(required=True) DispatchID = db.StringProperty(required=True) LogID = db.StringProperty(required=True) LogTime = db.DateTimeProperty() LogType = db.StringProperty() LogTypeID = db.StringProperty() Location = db.StringProperty() Area = db.StringProperty() ThomasBrothers = db.StringProperty() TBXY = db.StringProperty() LogDetails = db.BlobProperty() geolocation = db.GeoPtProperty() added = db.DateTimeProperty(auto_now_add=True) last_update = db.DateTimeProperty(auto_now=True) def getStatus(self): if self.added > datetime.utcnow() - timedelta(minutes=5): # less than 5 min old == new return 'new' elif self.last_update < datetime.utcnow() - timedelta(minutes=5): # not updated in 5 min == inactive return 'inactive' else: return 'active'
# -*- coding: utf-8 -*- from setuptools import setup, find_packages name = 'morepath_cerebral_todomvc' description = ( 'Morepath example of using React & Cerebral' ) version = '0.1.0' setup( name=name, version=version, description=description, author='Henri Hulski', author_email='[email protected]', license='MIT', url="https://github.com/morepath/morepath_cerebral_todomvc", packages=find_packages(), include_package_data=True, zip_safe=False, platforms='any', install_requires=[ 'morepath>=0.14', ], extras_require=dict( test=[ 'pytest', 'webtest', ], ), entry_points=dict( console_scripts=[ 'run-app = morepath_cerebral_todomvc.run:run', ] ), classifiers=[ 'Intended Audience :: Developers', 'Environment :: Web Environment', 'Topic :: Internet :: WWW/HTTP :: WSGI', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', ] )
Adjust entry_points to fix autoscan
# -*- coding: utf-8 -*- from setuptools import setup, find_packages name = 'morepath_cerebral_todomvc' description = ( 'Morepath example of using React & Cerebral' ) version = '0.1.0' setup( name=name, version=version, description=description, author='Henri Hulski', author_email='[email protected]', license='MIT', url="https://github.com/morepath/morepath_cerebral_todomvc", packages=find_packages(), include_package_data=True, zip_safe=False, platforms='any', install_requires=[ 'morepath>=0.14', ], extras_require=dict( test=[ 'pytest', 'webtest', ], ), entry_points=dict( morepath=[ 'scan = server', ], console_scripts=[ 'run-app = server.run:run', ], ), classifiers=[ 'Intended Audience :: Developers', 'Environment :: Web Environment', 'Topic :: Internet :: WWW/HTTP :: WSGI', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3.4', ] )
from setuptools import find_packages, setup setup( name='ActionCableZwei', version='0.1.3', license='MIT', description='Action Cable Client for Python 3', author='Tobias Feistmantl', author_email='[email protected]', url='https://github.com/tobiasfeistmantl/python-actioncable-zwei', packages=find_packages(), install_requires=['websocket-client'] )
Update version number to 0.1.4
from setuptools import find_packages, setup setup( name='ActionCableZwei', version='0.1.4', license='MIT', description='Action Cable Client for Python 3', author='Tobias Feistmantl', author_email='[email protected]', url='https://github.com/tobiasfeistmantl/python-actioncable-zwei', packages=find_packages(), install_requires=['websocket-client'] )
from setuptools import setup, find_packages import os setup( name='yamlicious', packages=find_packages(), scripts=[os.path.join('bin', p) for p in os.listdir('bin')], )
Add YAML as a dep.
from setuptools import setup, find_packages import os setup( name='yamlicious', packages=find_packages(), scripts=[os.path.join('bin', p) for p in os.listdir('bin')], install_requires=[ 'pyyaml', ] )
from setuptools import setup, find_packages from suponoff import __version__ as version if __name__ == '__main__': with open("README.rst") as f: long_description = f.read() setup( name="suponoff", version=version, author="Gambit Research", author_email="[email protected]", description="An alternative Supervisor web interface.", long_description=long_description, license="BSD", url="https://github.com/GambitResearch/suponoff", zip_safe=False, include_package_data=True, packages=find_packages(), scripts=[ 'suponoff-monhelper.py' ], install_requires=[ "Django >= 1.7", # just because I only tested with Django 1.7... ], classifiers=[ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: Linux", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", ("Topic :: Software Development :: Libraries :: " "Application Frameworks"), "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Systems Administration", ])
Fix the Operating System classifier, it was invalid
from setuptools import setup, find_packages from suponoff import __version__ as version if __name__ == '__main__': with open("README.rst") as f: long_description = f.read() setup( name="suponoff", version=version, author="Gambit Research", author_email="[email protected]", description="An alternative Supervisor web interface.", long_description=long_description, license="BSD", url="https://github.com/GambitResearch/suponoff", zip_safe=False, include_package_data=True, packages=find_packages(), scripts=[ 'suponoff-monhelper.py' ], install_requires=[ "Django >= 1.7", # just because I only tested with Django 1.7... ], classifiers=[ "Development Status :: 4 - Beta", "Environment :: Web Environment", "Framework :: Django", "Intended Audience :: Developers", "License :: OSI Approved :: BSD License", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI", ("Topic :: Software Development :: Libraries :: " "Application Frameworks"), "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Systems Administration", ])
#!/usr/bin/env python3 import os from setuptools import setup, find_packages def get_readme(): return open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() setup( author="Julio Gonzalez Altamirano", author_email='[email protected]', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', ], description="ETL for CapMetro raw data.", entry_points={ 'console_scripts': [ 'capmetrics=capmetrics_etl.cli:run', 'capmetrics-tables=capmetrics_etl.cli.tables' ], }, install_requires=['click', 'pytz', 'sqlalchemy', 'xlrd'], keywords="python etl transit", license="MIT", long_description=get_readme(), name='capmetrics-etl', package_data={ 'capmetrics_etl': ['templates/*.html'], }, packages=find_packages(include=['capmetrics_etl', 'capmetrics_etl.*'], exclude=['tests', 'tests.*']), platforms=['any'], url='https://github.com/jga/capmetrics-etl', version='0.1.0' )
Switch capmetrics script to etl function.
#!/usr/bin/env python3 import os from setuptools import setup, find_packages def get_readme(): return open(os.path.join(os.path.dirname(__file__), 'README.rst')).read() setup( author="Julio Gonzalez Altamirano", author_email='[email protected]', classifiers=[ 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python :: 3 :: Only', ], description="ETL for CapMetro raw data.", entry_points={ 'console_scripts': [ 'capmetrics=capmetrics_etl.cli:etl', 'capmetrics-tables=capmetrics_etl.cli.tables' ], }, install_requires=['click', 'pytz', 'sqlalchemy', 'xlrd'], keywords="python etl transit", license="MIT", long_description=get_readme(), name='capmetrics-etl', package_data={ 'capmetrics_etl': ['templates/*.html'], }, packages=find_packages(include=['capmetrics_etl', 'capmetrics_etl.*'], exclude=['tests', 'tests.*']), platforms=['any'], url='https://github.com/jga/capmetrics-etl', version='0.1.0' )
from distutils.core import setup setup( name='JekPost', version='0.1.0', author='Arjun Krishna Babu', author_email='[email protected]', packages=['jekpost'], url='https://github.com/arjunkrishnababu96/jekpost', license='LICENSE.txt', description='Package to ease the process of creating a new Jekyll post', long_description=open('README.txt').read(), classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3.5", "Topic :: Utilities", ], )
Change classifier to include python 3 (instead of python 3.5)
from distutils.core import setup setup( name='JekPost', version='0.1.0', author='Arjun Krishna Babu', author_email='[email protected]', packages=['jekpost'], url='https://github.com/arjunkrishnababu96/jekpost', license='LICENSE.txt', description='Package to ease the process of creating a new Jekyll post', long_description=open('README.txt').read(), classifiers=[ "Programming Language :: Python", "Programming Language :: Python :: 3", "Topic :: Utilities", ], )
#!/usr/bin/env python # coding=utf-8 __author__ = '[email protected]' from setuptools import setup setup(name="Power", version="1.0", description="Cross-platform system power status information.", author="Ilya Kulakov", author_email="[email protected]", url="https://github.com/Kentzo/Power", platforms=["Mac OS X 10.6+", "Windows XP+", "Linux 2.6+"], packages=['power'], classifiers=[ 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Topic :: System :: Power (UPS)', 'Topic :: System :: Hardware', 'Topic :: System :: Monitoring' ], install_requires=['pyobjc-core == 2.3.2a0'] )
Fix wrong install requirement name.
#!/usr/bin/env python # coding=utf-8 __author__ = '[email protected]' from setuptools import setup setup(name="Power", version="1.0", description="Cross-platform system power status information.", author="Ilya Kulakov", author_email="[email protected]", url="https://github.com/Kentzo/Power", platforms=["Mac OS X 10.6+", "Windows XP+", "Linux 2.6+"], packages=['power'], classifiers=[ 'Operating System :: MacOS :: MacOS X', 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Topic :: System :: Power (UPS)', 'Topic :: System :: Hardware', 'Topic :: System :: Monitoring' ], install_requires=['pyobjc == 2.3'] )
from setuptools import setup, find_packages setup( name = "sanitize", version = "0.33", description = "bringing sanitiy to world of messed-up data", author = "Aaron Swartz", author_email = "[email protected]", url='http://www.aaronsw.com/2002/sanitize/', license=open('LICENCE').read(), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.3', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.0', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2' ], license='BSD', packages=find_packages(), py_modules=['sanitize'], include_package_data=True, zip_safe=False, )
Make the first word of the package description capital
from setuptools import setup, find_packages setup( name = "sanitize", version = "0.33", description = "Bringing sanitiy to world of messed-up data", author = "Aaron Swartz", author_email = "[email protected]", url='http://www.aaronsw.com/2002/sanitize/', license=open('LICENCE').read(), classifiers=[ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: GNU General Public License (GPL)', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.3', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.0', 'Programming Language :: Python :: 3.1', 'Programming Language :: Python :: 3.2' ], license='BSD', packages=find_packages(), py_modules=['sanitize'], include_package_data=True, zip_safe=False, )
import os from setuptools import setup version_file = open(os.path.join(os.path.dirname(__file__), 'VERSION')) version = version_file.read().strip() README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read() setup( name='canvas_python_sdk', version=version, description='A python sdk for the canvas LMS api', author='Harvard University', author_email='[email protected]', url='https://github.com/Harvard-University-iCommons/canvas_python_sdk', packages=['canvas_sdk'], long_description=README, classifiers=[ "License :: OSI Approved :: MIT License", 'Operating System :: OS Independent', "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Topic :: Software Development", ], keywords='canvas api sdk LMS', license='MIT', install_requires=[ 'requests>=2.3.0' ], test_suite='tests', tests_require=[ 'mock>=1.0.1', ], )
Use find_packages helper to get everything except for tests. Add sphinx as an installation requirement.
import os from setuptools import setup, find_packages version_file = open(os.path.join(os.path.dirname(__file__), 'VERSION')) version = version_file.read().strip() README = open(os.path.join(os.path.dirname(__file__), 'README.md')).read() setup( name='canvas_python_sdk', version=version, description='A python sdk for the canvas LMS api', author='Harvard University', author_email='[email protected]', url='https://github.com/Harvard-University-iCommons/canvas_python_sdk', packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]), long_description=README, classifiers=[ "License :: OSI Approved :: MIT License", 'Operating System :: OS Independent', "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Development Status :: 4 - Beta", "Intended Audience :: Developers", "Topic :: Software Development", ], keywords='canvas api sdk LMS', license='MIT', install_requires=[ 'requests>=2.3.0', 'sphinx>=1.2.0', ], test_suite='tests', tests_require=[ 'mock>=1.0.1', ], )
#!/usr/bin/env python # coding=UTF-8 """Module containing Gcode parsing functions""" __author__ = "Dylan Armitage" __email__ = "[email protected]" ####---- Imports ----#### from pygcode import Line, GCodeLinearMove def bounding_box(gcode_file): """Take in file of gcode, return dict of max and min bounding values""" raise NotImplemented def box_gcode(min_xy, max_xy): """Take in min/max coordinate tuples, return G0 commands to bound it""" raise NotImplemented def mid_gcode(min_xy, max_xy): """Take in min/max coord tuples, return G0 to go to midpoint""" raise NotImplemented
ADD function to return box gcode
#!/usr/bin/env python # coding=UTF-8 """Module containing Gcode parsing functions""" __author__ = "Dylan Armitage" __email__ = "[email protected]" ####---- Imports ----#### from pygcode import Line, GCodeLinearMove def bounding_box(gcode_file): """Take in file of gcode, return dict of max and min bounding values""" raise NotImplemented def box_gcode(min_xy, max_xy): """Take in min/max coordinate tuples, return G0 commands to bound it""" gcode = [] gcode.append(GCodeLinearMove(X=min_xy[0], Y=min_xy[1])) gcode.append(GCodeLinearMove(X=max_xy[0], Y=min_xy[1])) gcode.append(GCodeLinearMove(X=max_xy[0], Y=max_xy[1])) gcode.append(GCodeLinearMove(X=min_xy[0], Y=max_xy[1])) gcode.append(GCodeLinearMove(X=min_xy[0], Y=min_xy[1])) # Convert from GCodeLinearMove class to string gcode = [str(line) for line in gcode] return gcode def mid_gcode(min_xy, max_xy): """Take in min/max coord tuples, return G0 to go to midpoint""" raise NotImplemented
from django.contrib import admin from . import models admin.site.register(models.Weekday) admin.site.register(models.Meal) admin.site.register(models.MealOption) admin.site.register(models.Course) admin.site.register(models.Timetable) admin.site.register(models.Dish) admin.site.register(models.Admin)
Add extra space below ending of import
from django.contrib import admin from . import models admin.site.register(models.Weekday) admin.site.register(models.Meal) admin.site.register(models.MealOption) admin.site.register(models.Course) admin.site.register(models.Timetable) admin.site.register(models.Dish) admin.site.register(models.Admin)
# -*- coding: utf-8 -*- import warnings from django.conf import settings # pylint: disable=W0611 from appconf import AppConf class EasyMapsSettings(AppConf): CENTER = (-41.3, 32) GEOCODE = 'easy_maps.geocode.google_v3' ZOOM = 16 # See https://developers.google.com/maps/documentation/javascript/tutorial#MapOptions for more information. LANGUAGE = 'en' # See https://developers.google.com/maps/faq#languagesupport for supported languages. GOOGLE_MAPS_API_KEY = None GOOGLE_KEY = None CACHE_LIFETIME = 600 # 10 minutes in seconds class Meta: prefix = 'easy_maps' holder = 'easy_maps.conf.settings' if hasattr(settings, 'EASY_MAPS_GOOGLE_MAPS_API_KEY'): warnings.warn("EASY_MAPS_GOOGLE_MAPS_API_KEY is deprecated, use EASY_MAPS_GOOGLE_KEY", DeprecationWarning)
Check is EASY_MAPS_GOOGLE_MAPS_API_KEY is not None before raising warning.
# -*- coding: utf-8 -*- import warnings from django.conf import settings # pylint: disable=W0611 from appconf import AppConf class EasyMapsSettings(AppConf): CENTER = (-41.3, 32) GEOCODE = 'easy_maps.geocode.google_v3' ZOOM = 16 # See https://developers.google.com/maps/documentation/javascript/tutorial#MapOptions for more information. LANGUAGE = 'en' # See https://developers.google.com/maps/faq#languagesupport for supported languages. GOOGLE_MAPS_API_KEY = None GOOGLE_KEY = None CACHE_LIFETIME = 600 # 10 minutes in seconds class Meta: prefix = 'easy_maps' holder = 'easy_maps.conf.settings' if settings.EASY_MAPS_GOOGLE_MAPS_API_KEY is not None: warnings.warn("EASY_MAPS_GOOGLE_MAPS_API_KEY is deprecated, use EASY_MAPS_GOOGLE_KEY", DeprecationWarning)
import unittest from stacklogger import srcfile class TestUtils(unittest.TestCase): def test_srcfile(self): self.assertTrue(srcfile("foo.py").endswith("foo.py")) self.assertTrue(srcfile("foo.pyc").endswith("foo.py")) self.assertTrue(srcfile("foo.pyo").endswith("foo.py")) self.assertTrue(srcfile("foo").endswith("foo"))
Build fake frames for later testing.
import inspect import unittest from stacklogger import srcfile currentframe = inspect.currentframe class FakeFrames(object): def fake_method(self): return currentframe() @property def fake_property(self): return currentframe() @classmethod def fake_classmethod(cls): return currentframe() @staticmethod def fake_staticmethod(): return currentframe() def fake_function(): return currentframe() class TestUtils(unittest.TestCase): def test_srcfile(self): self.assertTrue(srcfile("foo.py").endswith("foo.py")) self.assertTrue(srcfile("foo.pyc").endswith("foo.py")) self.assertTrue(srcfile("foo.pyo").endswith("foo.py")) self.assertTrue(srcfile("foo").endswith("foo"))
#!/usr/bin/env python """Script for updating a directory of repositories.""" import logging import os import click from .actions import update_repo @click.command() @click.option('-d', '--debug', help='Set DEBUG level logging.', is_flag=True) @click.argument('dir', default='.') def main(**kwargs): """Update repositories in a directory. By default, the current working directory list is used for finding valid repositories. """ logging.basicConfig( level=logging.INFO, format='[%(levelname)s] %(name)s:%(funcName)s - %(message)s') log = logging.getLogger(__name__) if kwargs['debug']: logging.root.setLevel(logging.DEBUG) main_dir = kwargs['dir'] log.info('Finding directories in %s', main_dir) dir_list = os.listdir(main_dir) log.debug('List of directories: %s', dir_list) # Git directory was passed in, not a directory of Git directories if '.git' in dir_list: dir_list = [kwargs['dir']] for directory in dir_list: update_repo(os.path.join(main_dir, directory)) if __name__ == '__main__': main()
fix: Change to multiple verbose option
#!/usr/bin/env python """Script for updating a directory of repositories.""" import logging import os import click from .actions import update_repo def set_logging(ctx, param, value): """Set logging level based on how many verbose flags.""" logging_level = (logging.root.getEffectiveLevel() - value * 10) or 1 logging.basicConfig(level=logging_level, format='[%(levelname)s] %(name)s:%(funcName)s - %(message)s') @click.command() @click.option('-v', '--verbose', count=True, callback=set_logging, help='More verbose logging, use multiple times.') @click.argument('dir', default='.') def main(**kwargs): """Update repositories in a directory. By default, the current working directory list is used for finding valid repositories. """ log = logging.getLogger(__name__) main_dir = kwargs['dir'] log.info('Finding directories in %s', main_dir) dir_list = os.listdir(main_dir) log.debug('List of directories: %s', dir_list) # Git directory was passed in, not a directory of Git directories if '.git' in dir_list: dir_list = [kwargs['dir']] for directory in dir_list: update_repo(os.path.join(main_dir, directory)) if __name__ == '__main__': main()
print """ Version Info ============ To obtain version info:: from scan.version import __version__, version_history print __version__ print version_history """ from scan.version import __version__, version_history print "Version history::" for line in version_history.splitlines(): print (" " + line)
Include version detail in doc
print """ Version Info ============ To obtain version info:: from scan.version import __version__, version_history print __version__ print version_history """ import sys sys.path.append("..") from scan.version import __version__, version_history print "Version history::" for line in version_history.splitlines(): print (" " + line)
from .models import Campaign, InformationObject def connect_info_object(sender, **kwargs): reference = kwargs.get('reference') if not reference: return if not reference.startswith('campaign:'): return namespace, campaign_value = reference.split(':', 1) try: campaign, slug = campaign_value.split('@', 1) except (ValueError, IndexError): return try: campaign_pk = int(campaign) except ValueError: return try: campaign = Campaign.objects.get(pk=campaign_pk) except Campaign.DoesNotExist: return try: kwargs = { 'pk': int(slug) } except ValueError: kwargs = {'slug': slug} try: iobj = InformationObject.objects.get(campaign=campaign, **kwargs) except InformationObject.DoesNotExist: return if iobj.foirequest is not None: return if iobj.publicbody != sender.public_body: return if not sender.public: return iobj.foirequest = sender iobj.save()
Save request in new m2m filed
from .models import Campaign, InformationObject def connect_info_object(sender, **kwargs): reference = kwargs.get('reference') if not reference: return if not reference.startswith('campaign:'): return namespace, campaign_value = reference.split(':', 1) try: campaign, slug = campaign_value.split('@', 1) except (ValueError, IndexError): return try: campaign_pk = int(campaign) except ValueError: return try: campaign = Campaign.objects.get(pk=campaign_pk) except Campaign.DoesNotExist: return try: kwargs = { 'pk': int(slug) } except ValueError: kwargs = {'slug': slug} try: iobj = InformationObject.objects.get(campaign=campaign, **kwargs) except InformationObject.DoesNotExist: return if iobj.publicbody != sender.public_body: return if not sender.public: return if iobj.foirequest is None: iobj.foirequest = sender iobj.foirequests.add(sender) iobj.save()
#!/usr/bin/env python # -*- coding: utf-8 -*- """Shared fixtures for :mod:`pytest`.""" from __future__ import print_function, absolute_import import os import pytest # noqa import gryaml from py2neo_compat import py2neo_ver @pytest.fixture def graphdb(): """Fixture connecting to graphdb.""" if 'NEO4J_URI' not in os.environ: pytest.skip('Need NEO4J_URI environment variable set') graphdb = gryaml.connect(uri=os.environ['NEO4J_URI']) graphdb.cypher.execute('MATCH (n) DETACH DELETE n') return graphdb @pytest.yield_fixture def graphdb_offline(): """Ensure the database is not connected.""" if py2neo_ver < 2: pytest.skip('Offline not supported in py2neo < 2') neo4j_uri_env = os.environ.get('NEO4J_URI', None) if neo4j_uri_env: del os.environ['NEO4J_URI'] old_graphdb = gryaml._py2neo.graphdb gryaml._py2neo.graphdb = None yield gryaml._py2neo.graphdb = old_graphdb if neo4j_uri_env: os.environ['NEO4J_URI'] = neo4j_uri_env
Use `delete_all` instead of running cypher query
#!/usr/bin/env python # -*- coding: utf-8 -*- """Shared fixtures for :mod:`pytest`.""" from __future__ import print_function, absolute_import import os import pytest # noqa import gryaml from py2neo_compat import py2neo_ver @pytest.fixture def graphdb(): """Fixture connecting to graphdb.""" if 'NEO4J_URI' not in os.environ: pytest.skip('Need NEO4J_URI environment variable set') graphdb = gryaml.connect(uri=os.environ['NEO4J_URI']) graphdb.delete_all() return graphdb @pytest.yield_fixture def graphdb_offline(): """Ensure the database is not connected.""" if py2neo_ver < 2: pytest.skip('Offline not supported in py2neo < 2') neo4j_uri_env = os.environ.get('NEO4J_URI', None) if neo4j_uri_env: del os.environ['NEO4J_URI'] old_graphdb = gryaml._py2neo.graphdb gryaml._py2neo.graphdb = None yield gryaml._py2neo.graphdb = old_graphdb if neo4j_uri_env: os.environ['NEO4J_URI'] = neo4j_uri_env
DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'test.db', } } INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'tests', )
Add required SECRET_KEY setting for Django 1.5+
DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': 'test.db', } } INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'tests', ) SECRET_KEY = 'abc123'
# Copyright 2014 Anonymous7 from Reddit, Julian Andrews # # This software may be modified and distributed under the terms # of the MIT license. See the LICENSE file for details. from __future__ import absolute_import, division import collections import unittest import eval7.xorshift_rand class XorshiftRandTestCase(unittest.TestCase): SAMPLE_COUNT = 10000000 BINS = 1000 DELTA = 450 def check_uniform(self, counter): expected_count = self.SAMPLE_COUNT / self.BINS self.assertEqual(set(range(self.BINS)), set(counter.keys())) for count in counter.values(): self.assertAlmostEqual( count, expected_count, delta=self.DELTA ) def test_random_is_uniform(self): sample = ( eval7.xorshift_rand.random() for i in range(self.SAMPLE_COUNT) ) counter = collections.Counter(int(num * self.BINS) for num in sample) self.check_uniform(counter) def test_randint_is_uniform(self): sample = ( eval7.xorshift_rand.randint(self.BINS) for i in range(self.SAMPLE_COUNT) ) self.check_uniform(collections.Counter(sample))
Reduce sample count for xorshift_rand tests
# Copyright 2014 Anonymous7 from Reddit, Julian Andrews # # This software may be modified and distributed under the terms # of the MIT license. See the LICENSE file for details. from __future__ import absolute_import, division import collections import unittest import eval7.xorshift_rand class XorshiftRandTestCase(unittest.TestCase): SAMPLE_COUNT = 1000000 BINS = 1000 DELTA = 125 def check_uniform(self, counter): expected_count = self.SAMPLE_COUNT / self.BINS self.assertEqual(set(range(self.BINS)), set(counter.keys())) for count in counter.values(): self.assertAlmostEqual( count, expected_count, delta=self.DELTA ) def test_random_is_uniform(self): sample = ( eval7.xorshift_rand.random() for i in range(self.SAMPLE_COUNT) ) counter = collections.Counter(int(num * self.BINS) for num in sample) self.check_uniform(counter) def test_randint_is_uniform(self): sample = ( eval7.xorshift_rand.randint(self.BINS) for i in range(self.SAMPLE_COUNT) ) self.check_uniform(collections.Counter(sample))
# A special triplet is defined as: a <= b <= c for # a in list_a, b in list_b, and c in list_c def get_num_special_triplets(list_a, list_b, list_c): num_special_triplets = 0 for b in list_b: len_a_candidates = len([a for a in list_a if a <= b]) len_c_candidates = len([c for c in list_c if c <= b]) num_special_triplets += 1 * len_a_candidates * len_c_candidates return num_special_triplets if __name__ == '__main__': _ = input().split() list_a = list(set(map(int, input().rstrip().split()))) list_b = list(set(map(int, input().rstrip().split()))) list_c = list(set(map(int, input().rstrip().split()))) num_special_triplets = get_num_special_triplets(list_a, list_b, list_c) print(num_special_triplets)
Sort lists prior to computing len of candidates
# A special triplet is defined as: a <= b <= c for # a in list_a, b in list_b, and c in list_c def get_num_special_triplets(list_a, list_b, list_c): # remove duplicates and sort lists list_a = sorted(set(list_a)) list_b = sorted(set(list_b)) list_c = sorted(set(list_c)) num_special_triplets = 0 for b in list_b: len_a_candidates = num_elements_less_than(b, list_a) len_c_candidates = num_elements_less_than(b, list_c) num_special_triplets += 1 * len_a_candidates * len_c_candidates return num_special_triplets def num_elements_less_than(target, sorted_list): for index, candidate in enumerate(sorted_list): if candidate > target: return index return len(sorted_list) if __name__ == '__main__': _ = input().split() list_a = list(map(int, input().rstrip().split())) list_b = list(map(int, input().rstrip().split())) list_c = list(map(int, input().rstrip().split())) num_special_triplets = get_num_special_triplets(list_a, list_b, list_c) print(num_special_triplets)
import unittest from hunting.level.map import LevelTile, LevelMap class TestPathfinding(unittest.TestCase): def test_basic_diagonal(self): level_map = LevelMap() level_map.set_map([[LevelTile() for _ in range(0, 5)] for _ in range(0, 5)]) self.assertEqual([(1, 1), (2, 2), (3, 3), (4, 4)], level_map.a_star_path(0, 0, 4, 4)) def test_paths_around_wall(self): level_map = LevelMap() level_map.set_map([[LevelTile() for _ in range(0, 3)] for _ in range(0, 5)]) for x in range(1, 5): level_map[x][1].blocks = True self.assertEqual([(3, 0), (2, 0), (1, 0), (0, 1), (1, 2), (2, 2), (3, 2), (4, 2)], level_map.a_star_path(4, 0, 4, 2))
Add test for force_pathable_endpoint pathfind param This parameter is intended to allow pathing to adjacent squares of an unpassable square. This is necessary because if you want to pathfind to a monster which blocks a square, you don't want to actually go *onto* the square, you just want to go next to it, presumably so you can hit it.
import unittest from hunting.level.map import LevelTile, LevelMap class TestPathfinding(unittest.TestCase): def test_basic_diagonal(self): level_map = LevelMap([[LevelTile() for _ in range(0, 5)] for _ in range(0, 5)]) self.assertEqual([(1, 1), (2, 2), (3, 3), (4, 4)], level_map.a_star_path(0, 0, 4, 4)) def test_paths_around_wall(self): level_map = LevelMap([[LevelTile() for _ in range(0, 3)] for _ in range(0, 5)]) for x in range(1, 5): level_map[x][1].blocks = True self.assertEqual([(3, 0), (2, 0), (1, 0), (0, 1), (1, 2), (2, 2), (3, 2), (4, 2)], level_map.a_star_path(4, 0, 4, 2)) def tests_force_pathable_endpoint_parameter(self): level_map = LevelMap([[LevelTile(False, False)], [LevelTile(True, True)]]) self.assertEqual([(1, 0)], level_map.a_star_path(0, 0, 1, 0, True)) self.assertEqual([], level_map.a_star_path(0, 0, 1, 0, False))
from nose.tools import istest, assert_equal from mammoth.raw_text import extract_raw_text_from_element from mammoth import documents @istest def raw_text_of_text_element_is_value(): assert_equal("Hello", extract_raw_text_from_element(documents.Text("Hello"))) @istest def raw_text_of_paragraph_is_terminated_with_newlines(): paragraph = documents.paragraph(children=[documents.Text("Hello")]) assert_equal("Hello\n\n", extract_raw_text_from_element(paragraph)) @istest def non_text_element_without_children_has_no_raw_text(): tab = documents.Tab() assert not hasattr(tab, "children") assert_equal("", extract_raw_text_from_element(documents.Tab()))
Make raw text tests consistent with mammoth.js
from nose.tools import istest, assert_equal from mammoth.raw_text import extract_raw_text_from_element from mammoth import documents @istest def text_element_is_converted_to_text_content(): element = documents.Text("Hello.") result = extract_raw_text_from_element(element) assert_equal("Hello.", result) @istest def paragraphs_are_terminated_with_newlines(): element = documents.paragraph( children=[ documents.Text("Hello "), documents.Text("world."), ], ) result = extract_raw_text_from_element(element) assert_equal("Hello world.\n\n", result) @istest def children_are_recursively_converted_to_text(): element = documents.document([ documents.paragraph( [ documents.text("Hello "), documents.text("world.") ], {} ) ]) result = extract_raw_text_from_element(element) assert_equal("Hello world.\n\n", result) @istest def non_text_element_without_children_is_converted_to_empty_string(): element = documents.line_break assert not hasattr(element, "children") result = extract_raw_text_from_element(element) assert_equal("", result)
from redis import Redis from remotecv.result_store import BaseStore from remotecv.utils import logger class ResultStore(BaseStore): WEEK = 604800 redis_instance = None def __init__(self, config): super(ResultStore, self).__init__(config) if not ResultStore.redis_instance: ResultStore.redis_instance = Redis( host=config.redis_host, port=config.redis_port, db=config.redis_database, password=config.redis_password, ) self.storage = ResultStore.redis_instance def store(self, key, points): result = self.serialize(points) logger.debug("Points found: %s", result) redis_key = "thumbor-detector-%s" % key self.storage.setex(redis_key, result, 2 * self.WEEK)
Make compatible with py-redis 3.x Fixes https://github.com/thumbor/remotecv/issues/25
from redis import Redis from remotecv.result_store import BaseStore from remotecv.utils import logger class ResultStore(BaseStore): WEEK = 604800 redis_instance = None def __init__(self, config): super(ResultStore, self).__init__(config) if not ResultStore.redis_instance: ResultStore.redis_instance = Redis( host=config.redis_host, port=config.redis_port, db=config.redis_database, password=config.redis_password, ) self.storage = ResultStore.redis_instance def store(self, key, points): result = self.serialize(points) logger.debug("Points found: %s", result) redis_key = "thumbor-detector-%s" % key self.storage.setex( name=redis_key, value=result, time=2 * self.WEEK, )
from mock import patch from django.test import TestCase from breach.sniffer import Sniffer class SnifferTest(TestCase): def setUp(self): self.endpoint = 'http://localhost' self.sniffer = Sniffer(self.endpoint, '147.102.239.229', 'dionyziz.com', 'wlan0', '8080') @patch('breach.sniffer.requests') def test_sniffer_start(self, requests): self.sniffer.start() self.assertTrue(requests.post.called) @patch('breach.sniffer.requests') def test_sniffer_read(self, requests): self.sniffer.read() self.assertTrue(requests.get.called) @patch('breach.sniffer.requests') def test_sniffer_delete(self, requests): self.sniffer.delete() self.assertTrue(requests.post.called)
Update sniffer tests with new argument passing
from mock import patch from django.test import TestCase from breach.sniffer import Sniffer class SnifferTest(TestCase): def setUp(self): self.endpoint = 'http://localhost' sniffer_params = { 'snifferendpoint': self.endpoint, 'sourceip': '147.102.239.229', 'host': 'dionyziz.com', 'interface': 'wlan0', 'port': '8080', 'calibration_wait': 0.0 } self.sniffer = Sniffer(sniffer_params) @patch('breach.sniffer.requests') def test_sniffer_start(self, requests): self.sniffer.start() self.assertTrue(requests.post.called) @patch('breach.sniffer.requests') def test_sniffer_read(self, requests): self.sniffer.read() self.assertTrue(requests.get.called) @patch('breach.sniffer.requests') def test_sniffer_delete(self, requests): self.sniffer.delete() self.assertTrue(requests.post.called)
#!/usr/bin/env python3 """ Convert a keras model, saved with model.save(...) to a weights and architecture component. """ import argparse def get_args(): d = '(default: %(default)s)' parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('model') parser.add_argument('-w','--weight-file-name', default='weights.h5', help=d) parser.add_argument('-a', '--architecture-file-name', default='architecture.json', help=d) return parser.parse_args() def run(): args = get_args() import keras m = keras.models.load_model(args.model) m.save_weights(args.weight_file_name) with open(args.architecture_file_name,'w') as arch: arch.write(m.to_json(indent=2)) if __name__ == '__main__': run()
Remove Keras from network splitter Keras isn't as stable as h5py and json. This commit removes the keras dependency from the network splitting function.
#!/usr/bin/env python3 """ Convert a keras model, saved with model.save(...) to a weights and architecture component. """ import argparse def get_args(): d = '(default: %(default)s)' parser = argparse.ArgumentParser(description=__doc__) parser.add_argument('model') parser.add_argument('-w','--weight-file-name', default='weights.h5', help=d) parser.add_argument('-a', '--architecture-file-name', default='architecture.json', help=d) return parser.parse_args() def run(): args = get_args() from h5py import File import json m = File(args.model,'r') with File(args.weight_file_name,'w') as w: for name, wt in w.items(): w.copy(wt, name) arch = json.loads(m.attrs['model_config']) with open(args.architecture_file_name,'w') as arch_file: arch_file.write(json.dumps(arch,indent=2)) if __name__ == '__main__': run()
from PyQt5.QtCore import QObject, pyqtSlot, pyqtProperty, pyqtSignal from UM.Application import Application from UM.Logger import Logger class MeshFileHandlerProxy(QObject): def __init__(self, parent = None): super().__init__(parent) self._mesh_handler = Application.getInstance().getMeshFileHandler() @pyqtProperty("QStringList", constant=True) def supportedReadFileTypes(self): fileTypes = [] fileTypes.append("All Supported Files (*{0})(*{0})".format(' *'.join(self._mesh_handler.getSupportedFileTypesRead()))) for ext in self._mesh_handler.getSupportedFileTypesRead(): fileTypes.append("{0} file (*.{0})(*.{0})".format(ext[1:])) fileTypes.append("All Files (*.*)(*)") return fileTypes @pyqtProperty("QStringList", constant=True) def supportedWriteFileTypes(self): return self._mesh_handler.getSupportedFileTypesWrite() def createMeshFileHandlerProxy(engine, scriptEngine): return MeshFileHandlerProxy()
Update the supported file types list exposed to QML to use the new dict correctly
from PyQt5.QtCore import QObject, pyqtSlot, pyqtProperty, pyqtSignal from UM.Application import Application from UM.Logger import Logger class MeshFileHandlerProxy(QObject): def __init__(self, parent = None): super().__init__(parent) self._mesh_handler = Application.getInstance().getMeshFileHandler() @pyqtProperty("QStringList", constant=True) def supportedReadFileTypes(self): file_types = [] all_types = [] for ext, desc in self._mesh_handler.getSupportedFileTypesRead().items(): file_types.append("{0} (*.{1})(*.{1})".format(desc, ext)) all_types.append("*.{0}".format(ext)) file_types.sort() file_types.insert(0, "All Supported Types ({0})({0})".format(" ".join(all_types))) file_types.append("All Files (*.*)(*)") return file_types @pyqtProperty("QStringList", constant=True) def supportedWriteFileTypes(self): #TODO: Implement return [] def createMeshFileHandlerProxy(engine, script_engine): return MeshFileHandlerProxy()
import sys import os def dcopid(): '''Get dcop id of karm. Fail if more than one instance running.''' id = stdin = stdout = None try: ( stdin, stdout ) = os.popen2( "dcop" ) l = stdout.readline() while l: if l.startswith( "karm" ): if not id: id = l else: raise "Only one instance of karm may be running." l = stdout.readline() if not id: raise "No karm instance found. Try running dcop at command-line to verify it works." except: if stdin: stdin.close() if stdout: stdout.close() print sys.exc_info()[0] sys.exit(1) stdin.close() stdout.close() # strip trailing newline return id.strip() def test( goal, actual ): '''Raise exception if goal != actual.''' if goal != actual: path, scriptname = os.path.split( sys.argv[0] ) raise "%s: expected '%s', got '%s'" % ( scriptname, goal, actual )
Add KarmTestError we can distinguish and print full tracebacks for unexpected errors. Delete exception trapping--let the test scripts do that. svn path=/trunk/kdepim/; revision=367066
import sys import os class KarmTestError( Exception ): pass def dcopid(): '''Get dcop id of karm. Fail if more than one instance running.''' id = stdin = stdout = None ( stdin, stdout ) = os.popen2( "dcop" ) l = stdout.readline() while l: if l.startswith( "karm" ): if not id: id = l else: raise KarmTestError( "Only one instance of karm may be running." ) l = stdout.readline() if not id: raise KarmTestError( "No karm instance found. Try running dcop at command-line to verify it works." ) stdin.close() stdout.close() # strip trailing newline return id.strip() def test( goal, actual ): '''Raise exception if goal != actual.''' if goal != actual: path, scriptname = os.path.split( sys.argv[0] ) raise KarmTestError( "%s: expected '%s', got '%s'" % ( scriptname, goal, actual ) )
""" Color mode data structure. """ from __future__ import absolute_import, unicode_literals import attr import logging from psd_tools2.psd.base import ValueElement from psd_tools2.utils import ( read_length_block, write_length_block, write_bytes ) logger = logging.getLogger(__name__) class ColorModeData(ValueElement): """ Color mode data section of the PSD file. For indexed color images the data is the color table for the image in a non-interleaved order. Duotone images also have this data, but the data format is undocumented. """ @classmethod def read(cls, fp): """Read the element from a file-like object. :param fp: file-like object :rtype: ColorModeData """ value = read_length_block(fp) logger.debug('reading color mode data, len=%d' % (len(value))) # TODO: Parse color table. return cls(value) def write(self, fp): """Write the element to a file-like object. :param fp: file-like object """ def writer(f): return write_bytes(f, self.value) logger.debug('writing color mode data, len=%d' % (len(self.value))) return write_length_block(fp, writer)
Fix empty color mode write
""" Color mode data structure. """ from __future__ import absolute_import, unicode_literals import attr import logging from psd_tools2.psd.base import ValueElement from psd_tools2.utils import ( read_length_block, write_length_block, write_bytes ) logger = logging.getLogger(__name__) @attr.s(repr=False, slots=True) class ColorModeData(ValueElement): """ Color mode data section of the PSD file. For indexed color images the data is the color table for the image in a non-interleaved order. Duotone images also have this data, but the data format is undocumented. """ value = attr.ib(default=b'', type=bytes) @classmethod def read(cls, fp): """Read the element from a file-like object. :param fp: file-like object :rtype: ColorModeData """ value = read_length_block(fp) logger.debug('reading color mode data, len=%d' % (len(value))) # TODO: Parse color table. return cls(value) def write(self, fp): """Write the element to a file-like object. :param fp: file-like object """ def writer(f): return write_bytes(f, self.value) logger.debug('writing color mode data, len=%d' % (len(self.value))) return write_length_block(fp, writer)
import importlib import os import sys import unittest import code import struct code_path = os.path.dirname(__file__) code_path = os.path.join(code_path, os.pardir) sys.path.append(code_path) import MOS6502 class TestCartHeaderParsing(unittest.TestCase): def testMagic(self): cpu = MOS6502.CPU() cpu.loadRom("../smb1.nes") self.assertEqual(cpu.rom != None, True) def testRomBanks(self): cpu = MOS6502.CPU() cpu.loadRom("../smb1.nes") self.assertEqual(cpu.rom.numRomBanks, 2) self.assertEqual(cpu.rom.numVromBanks, 1) if __name__ == '__main__': unittest.main()
Use the reset adder from the banks properly
import importlib import os import sys import unittest import code import struct code_path = os.path.dirname(__file__) code_path = os.path.join(code_path, os.pardir) sys.path.append(code_path) import MOS6502 class TestCartHeaderParsing(unittest.TestCase): def testMagic(self): cpu = MOS6502.CPU() cpu.loadRom("../smb1.nes") self.assertEqual(cpu.rom != None, True) def testRomBanks(self): cpu = MOS6502.CPU() cpu.loadRom("../smb1.nes") self.assertEqual(cpu.rom.numRomBanks, 2) self.assertEqual(cpu.rom.numVromBanks, 1) startAddr = cpu.ReadMemWord(cpu.reset) firstByte = cpu.ReadMemory(startAddr) self.assertEqual(firstByte, 0x78) if __name__ == '__main__': unittest.main()
# Copyright (C) 2010 rPath, Inc. import testsuite testsuite.setup() from testrunner import testcase from rpath_repeater import models class TestBase(testcase.TestCaseWithWorkDir): pass class ModelsTest(TestBase): def testModelToXml(self): files = models.ImageFiles([ models.ImageFile(title="i1", sha1="s1", size=1), models.ImageFile(title="i2", sha1="s2"), ]) metadata = models.ImageMetadata(owner="me") files.append(metadata) self.failUnlessEqual(files.toXml(), '<files><file><title>i1</title><size>1</size><sha1>s1</sha1></file><file><title>i2</title><sha1>s2</sha1></file><metadata><owner>me</owner></metadata></files>') testsuite.main()
Fix test after metadata changes
# Copyright (C) 2010 rPath, Inc. import testsuite testsuite.setup() from testrunner import testcase from rpath_repeater import models class TestBase(testcase.TestCaseWithWorkDir): pass class ModelsTest(TestBase): def testModelToXml(self): files = models.ImageFiles([ models.ImageFile(title="i1", sha1="s1", size=1), models.ImageFile(title="i2", sha1="s2"), ]) self.failUnlessEqual(files.toXml(), '<files><file><title>i1</title><size>1</size><sha1>s1</sha1></file><file><title>i2</title><sha1>s2</sha1></file></files>') testsuite.main()
class SMSRouter(object): app_label = 'sms' db_name = 'sms' def db_for_read(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def db_for_write(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def allow_relation(self, obj1, obj2, **hints): if obj1._meta.app_label == self.app_label or \ obj2._meta.app_label == self.app_label: return False return None def allow_migrate(self, db, app_label, model_name=None, **hints): if app_label == self.app_label: return False return None
Fix bug in sms router.
class TurboSMSRouter(object): app_label = 'turbosms' db_name = 'turbosms' def db_for_read(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def db_for_write(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def allow_relation(self, obj1, obj2, **hints): if obj1._meta.app_label == self.app_label or \ obj2._meta.app_label == self.app_label: return False return None def allow_migrate(self, db, app_label, model_name=None, **hints): if app_label == self.app_label: return False return None
try: from south.modelsinspector import add_ignored_fields add_ignored_fields(["^taggit_autosuggest\.managers"]) except ImportError: pass # without south this can fail silently
Correct ignored module name for South.
try: from south.modelsinspector import add_ignored_fields add_ignored_fields(["^taggit_autosuggest_select2\.managers"]) except ImportError: pass # without south this can fail silently
# Copyright 2012 Intel Inc, OpenStack LLC. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For filter and weight tests. """ from openstack.common.scheduler import weights class FakeWeigher1(weights.BaseHostWeigher): def __init__(self): pass class FakeWeigher2(weights.BaseHostWeigher): def __init__(self): pass class FakeClass(object): def __init__(self): pass
Fix Copyright Headers - Rename LLC to Foundation One code change, rest are in headers Change-Id: I73f59681358629e1ad74e49d3d3ca13fcb5c2eb1
# Copyright 2012 Intel Inc, OpenStack Foundation. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Fakes For filter and weight tests. """ from openstack.common.scheduler import weights class FakeWeigher1(weights.BaseHostWeigher): def __init__(self): pass class FakeWeigher2(weights.BaseHostWeigher): def __init__(self): pass class FakeClass(object): def __init__(self): pass
#!/usr/bin/env python # -*- coding: utf8 -*- import sys, os import argparse from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo def main(argv=None): parser = argparse.ArgumentParser(description='Print count of objects for a given collection.') parser.add_argument('path', help="Nuxeo path to collection") parser.add_argument('--pynuxrc', default='~/.pynuxrc-prod', help="rcfile for use with pynux utils") if argv is None: argv = parser.parse_args() dh = DeepHarvestNuxeo(argv.path, '', pynuxrc=argv.pynuxrc) print "about to fetch objects for path {}".format(dh.path) objects = dh.fetch_objects() object_count = len(objects) print "finished fetching objects. {} found".format(object_count) print "about to iterate through objects and get components" component_count = 0 for obj in objects: components = dh.fetch_components(obj) component_count = component_count + len(components) print "finished fetching components. {} found".format(component_count) print "Grand Total: {}".format(object_count + component_count) if __name__ == "__main__": sys.exit(main())
Add option to count components
#!/usr/bin/env python # -*- coding: utf8 -*- import sys, os import argparse from deepharvest.deepharvest_nuxeo import DeepHarvestNuxeo def main(argv=None): parser = argparse.ArgumentParser(description='Print count of objects for a given collection.') parser.add_argument('path', help="Nuxeo path to collection") parser.add_argument('--pynuxrc', default='~/.pynuxrc-prod', help="rcfile for use with pynux utils") parser.add_argument('--components', action='store_true', help="show counts for object components") if argv is None: argv = parser.parse_args() dh = DeepHarvestNuxeo(argv.path, '', pynuxrc=argv.pynuxrc) print "about to fetch objects for path {}".format(dh.path) objects = dh.fetch_objects() object_count = len(objects) print "finished fetching objects. {} found".format(object_count) if not argv.components: return print "about to iterate through objects and get components" component_count = 0 for obj in objects: components = dh.fetch_components(obj) component_count = component_count + len(components) print "finished fetching components. {} found".format(component_count) print "Grand Total: {}".format(object_count + component_count) if __name__ == "__main__": sys.exit(main())
"""EVELink - Python bindings for the EVE API.""" import logging from evelink import account from evelink import api from evelink import char from evelink import constants from evelink import corp from evelink import eve from evelink import map from evelink import server __version__ = "0.6.2" # Implement NullHandler because it was only added in Python 2.7+. class NullHandler(logging.Handler): def emit(self, record): pass # Create a logger, but by default, have it do nothing _log = logging.getLogger('evelink') _log.addHandler(NullHandler()) # Update the version number used in the user-agent api._user_agent = 'evelink v%s' % __version__ __all__ = [ "account", "api", "char", "constants", "corp", "eve", "map", "parsing", "server", ]
Update version to 0.7.0 for release
"""EVELink - Python bindings for the EVE API.""" import logging from evelink import account from evelink import api from evelink import char from evelink import constants from evelink import corp from evelink import eve from evelink import map from evelink import server __version__ = "0.7.0" # Implement NullHandler because it was only added in Python 2.7+. class NullHandler(logging.Handler): def emit(self, record): pass # Create a logger, but by default, have it do nothing _log = logging.getLogger('evelink') _log.addHandler(NullHandler()) # Update the version number used in the user-agent api._user_agent = 'evelink v%s' % __version__ __all__ = [ "account", "api", "char", "constants", "corp", "eve", "map", "parsing", "server", ]
from contextlib import contextmanager from sqlalchemy import MetaData, create_engine from sqlalchemy.engine import Engine from sqlalchemy.orm import sessionmaker class Schema(): def __init__(self, engine_or_url, *args, **kwargs): if isinstance(engine_or_url, Engine): self.engine = engine_or_url else: self.engine = create_engine(engine_or_url, *args, **kwargs) self.meta = MetaData(bind=self.engine) self.meta.reflect(views=True) self.Session = sessionmaker(bind=self.engine) def __getattr__(self, table_name): return self.meta.tables[table_name] def execute(self, *args, **kwargs): with self.session() as session: session.execute(*args, **kwargs) @contextmanager def session(self): """Provides a transactional scope around a series of operations.""" session = self.Session() try: yield session session.commit() except: session.rollback() raise finally: session.close()
Return the result of an Execute!
from contextlib import contextmanager from sqlalchemy import MetaData, create_engine from sqlalchemy.engine import Engine from sqlalchemy.orm import sessionmaker class Schema(): def __init__(self, engine_or_url, *args, **kwargs): if isinstance(engine_or_url, Engine): self.engine = engine_or_url else: self.engine = create_engine(engine_or_url, *args, **kwargs) self.meta = MetaData(bind=self.engine) self.meta.reflect(views=True) self.Session = sessionmaker(bind=self.engine) def __getattr__(self, table_name): return self.meta.tables[table_name] def execute(self, *args, **kwargs): with self.session() as session: return session.execute(*args, **kwargs) @contextmanager def session(self): """Provides a transactional scope around a series of operations.""" session = self.Session() try: yield session session.commit() except: session.rollback() raise finally: session.close()
"""Run autopep8 on the selected buffer in Vim. map <C-I> :pyfile <path_to>/vim_autopep8.py<CR> """ import vim if vim.eval('&syntax') == 'python': encoding = vim.eval('&fileencoding') source = '\n'.join(line.decode(encoding) for line in vim.current.buffer) + '\n' import autopep8 options = autopep8.parse_args(['--range', str(1 + vim.current.range.start), str(1 + vim.current.range.end), '']) formatted = autopep8.fix_code(source, options=options) if source != formatted: if formatted.endswith('\n'): formatted = formatted[:-1] vim.current.buffer[:] = [line.encode(encoding) for line in formatted.splitlines()]
Support Python 3 in Vim usage example
"""Run autopep8 on the selected buffer in Vim. map <C-I> :pyfile <path_to>/vim_autopep8.py<CR> Replace ":pyfile" with ":py3file" if Vim is built with Python 3 support. """ from __future__ import unicode_literals import sys import vim ENCODING = vim.eval('&fileencoding') def encode(text): if sys.version_info[0] >= 3: return text else: return text.encode(ENCODING) def decode(text): if sys.version_info[0] >= 3: return text else: return text.decode(ENCODING) if vim.eval('&syntax') == 'python': source = '\n'.join(decode(line) for line in vim.current.buffer) + '\n' import autopep8 options = autopep8.parse_args(['--range', str(1 + vim.current.range.start), str(1 + vim.current.range.end), '']) formatted = autopep8.fix_code(source, options=options) if source != formatted: if formatted.endswith('\n'): formatted = formatted[:-1] vim.current.buffer[:] = [encode(line) for line in formatted.splitlines()]
from sqlalchemy.orm import joinedload from datetime import datetime from changes.api.base import APIView from changes.api.build_index import execute_build from changes.config import db from changes.constants import Result, Status from changes.models import Build, Job, ItemStat class BuildRestartAPIView(APIView): def post(self, build_id): build = Build.query.options( joinedload('project', innerjoin=True), joinedload('author'), joinedload('source'), ).get(build_id) if build is None: return '', 404 if build.status != Status.finished: return '', 400 # remove any existing job data # TODO(dcramer): this is potentially fairly slow with cascades Job.query.filter( Job.build == build ).delete() ItemStat.query.filter( ItemStat.item_id == build.id ).delete() build.date_started = datetime.utcnow() build.date_modified = build.date_started build.date_finished = None build.duration = None build.status = Status.queued build.result = Result.unknown db.session.add(build) execute_build(build=build) return self.respond(build)
Clean up job stats when jobs are removed in build restart
from sqlalchemy.orm import joinedload from datetime import datetime from changes.api.base import APIView from changes.api.build_index import execute_build from changes.config import db from changes.constants import Result, Status from changes.models import Build, Job, ItemStat class BuildRestartAPIView(APIView): def post(self, build_id): build = Build.query.options( joinedload('project', innerjoin=True), joinedload('author'), joinedload('source'), ).get(build_id) if build is None: return '', 404 if build.status != Status.finished: return '', 400 # ItemStat doesnt cascade ItemStat.query.filter( ItemStat.item_id == build.id ).delete() ItemStat.query.filter( ItemStat.item_id.in_(Job.query.filter( Job.build_id == build.id, )), ).delete() # remove any existing job data # TODO(dcramer): this is potentially fairly slow with cascades Job.query.filter( Job.build_id == build.id ).delete() build.date_started = datetime.utcnow() build.date_modified = build.date_started build.date_finished = None build.duration = None build.status = Status.queued build.result = Result.unknown db.session.add(build) execute_build(build=build) return self.respond(build)
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. from selenium.webdriver.common.by import By from page import Page class Base(Page): _login_locator = (By.ID, 'login') _logout_locator = (By.ID, 'logout') _notification_locator = (By.CLASS_NAME, 'flash') def click_login(self): self.selenium.find_element(*self._login_locator).click() from pages.login import LoginPage return LoginPage(self.testsetup) def click_logout(self): self.selenium.find_element(*self._logout_locator).click() def login(self, username=None, password=None): login_page = self.click_login() return login_page.login(username, password) def logout(self): self.click_logout() @property def notification(self): return self.selenium.find_element(*self._notification_locator).text
Make username and password required arguments
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this file, # You can obtain one at http://mozilla.org/MPL/2.0/. from selenium.webdriver.common.by import By from page import Page class Base(Page): _login_locator = (By.ID, 'login') _logout_locator = (By.ID, 'logout') _notification_locator = (By.CLASS_NAME, 'flash') def click_login(self): self.selenium.find_element(*self._login_locator).click() from pages.login import LoginPage return LoginPage(self.testsetup) def click_logout(self): self.selenium.find_element(*self._logout_locator).click() def login(self, username, password): login_page = self.click_login() return login_page.login(username, password) def logout(self): self.click_logout() @property def notification(self): return self.selenium.find_element(*self._notification_locator).text
from ._remove import remove from ._load_and_dump import load, dump, ZerothIFD, ExifIFD, GPSIFD from ._transplant import transplant from ._insert import insert try: from ._thumbnail import thumbnail except ImportError: print("'thumbnail' function depends on PIL or Pillow.") VERSION = '0.4.7'
Revert "up version to 0.4.7." This reverts commit 9b1177d4a56070092faa89778911d11c70efdc54.
from ._remove import remove from ._load_and_dump import load, dump, ZerothIFD, ExifIFD, GPSIFD from ._transplant import transplant from ._insert import insert try: from ._thumbnail import thumbnail except ImportError: print("'thumbnail' function depends on PIL or Pillow.") VERSION = '0.4.6'
#!/usr/bin/python from subprocess import check_output as co from sys import exit # Actually run bin/mn rather than importing via python path version = 'Mininet ' + co( 'PYTHONPATH=. bin/mn --version', shell=True ) version = version.strip() # Find all Mininet path references lines = co( "egrep -or 'Mininet [0-9\.\+]+\w*' *", shell=True ) error = False for line in lines.split( '\n' ): if line and 'Binary' not in line: fname, fversion = line.split( ':' ) if version != fversion: print( "%s: incorrect version '%s' (should be '%s')" % ( fname, fversion, version ) ) error = True if error: exit( 1 )
Handle version string sent to stderr An unfortunate side effect of switching from print to output() is that all output() goes to stderr. We should probably carefully consider whether this is the right thing to do.
#!/usr/bin/python from subprocess import check_output as co from sys import exit # Actually run bin/mn rather than importing via python path version = 'Mininet ' + co( 'PYTHONPATH=. bin/mn --version 2>&1', shell=True ) version = version.strip() # Find all Mininet path references lines = co( "egrep -or 'Mininet [0-9\.\+]+\w*' *", shell=True ) error = False for line in lines.split( '\n' ): if line and 'Binary' not in line: fname, fversion = line.split( ':' ) if version != fversion: print( "%s: incorrect version '%s' (should be '%s')" % ( fname, fversion, version ) ) error = True if error: exit( 1 )
from django import template from django.conf import settings from django.utils.encoding import smart_str, force_unicode from django.utils.safestring import mark_safe register = template.Library() def saferst(value): try: from docutils.core import publish_parts except ImportError: return force_unicode(value) docutils_setttings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", dict()) try: parts = publish_parts(source=smart_str(value), writer_name="html4css1", settings_overrides=docutils_settings) except: return foce_unicode(value) else: return mark_safe(force_unicode(parts["fragment"])) saferst.is_safe = True register.filter(saferst)
Fix typo foce_unicode -> force_unicode
from django import template from django.conf import settings from django.utils.encoding import smart_str, force_unicode from django.utils.safestring import mark_safe register = template.Library() def saferst(value): try: from docutils.core import publish_parts except ImportError: return force_unicode(value) docutils_setttings = getattr(settings, "RESTRUCTUREDTEXT_FILTER_SETTINGS", dict()) try: parts = publish_parts(source=smart_str(value), writer_name="html4css1", settings_overrides=docutils_settings) except: return force_unicode(value) else: return mark_safe(force_unicode(parts["fragment"])) saferst.is_safe = True register.filter(saferst)
avrChipDB = { 'ATMega2560': { 'signature': [0x1E, 0x98, 0x01], 'pageSize': 128, 'pageCount': 1024, }, } def getChipFromDB(sig): for chip in avrChipDB.values(): if chip['signature'] == sig: return chip return False
Add ATMega1280 chip to programmer chips.
avrChipDB = { 'ATMega1280': { 'signature': [0x1E, 0x97, 0x03], 'pageSize': 128, 'pageCount': 512, }, 'ATMega2560': { 'signature': [0x1E, 0x98, 0x01], 'pageSize': 128, 'pageCount': 1024, }, } def getChipFromDB(sig): for chip in avrChipDB.values(): if chip['signature'] == sig: return chip return False
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import os import site import sys from . import build_root, requires_oscrypto from ._import import _preload deps_dir = os.path.join(build_root, 'modularcrypto-deps') if os.path.exists(deps_dir): site.addsitedir(deps_dir) if sys.version_info[0:2] not in [(2, 6), (3, 2)]: from .lint import run as run_lint else: run_lint = None if sys.version_info[0:2] != (3, 2): from .coverage import run as run_coverage from .coverage import coverage run_tests = None else: from .tests import run as run_tests run_coverage = None def run(): """ Runs the linter and tests :return: A bool - if the linter and tests ran successfully """ _preload(requires_oscrypto, True) if run_lint: print('') lint_result = run_lint() else: lint_result = True if run_coverage: print('\nRunning tests (via coverage.py %s)' % coverage.__version__) sys.stdout.flush() tests_result = run_coverage(ci=True) else: print('\nRunning tests') sys.stdout.flush() tests_result = run_tests(ci=True) sys.stdout.flush() return lint_result and tests_result
Fix CI to ignore system install of asn1crypto
# coding: utf-8 from __future__ import unicode_literals, division, absolute_import, print_function import os import site import sys from . import build_root, requires_oscrypto from ._import import _preload deps_dir = os.path.join(build_root, 'modularcrypto-deps') if os.path.exists(deps_dir): site.addsitedir(deps_dir) # In case any of the deps are installed system-wide sys.path.insert(0, deps_dir) if sys.version_info[0:2] not in [(2, 6), (3, 2)]: from .lint import run as run_lint else: run_lint = None if sys.version_info[0:2] != (3, 2): from .coverage import run as run_coverage from .coverage import coverage run_tests = None else: from .tests import run as run_tests run_coverage = None def run(): """ Runs the linter and tests :return: A bool - if the linter and tests ran successfully """ _preload(requires_oscrypto, True) if run_lint: print('') lint_result = run_lint() else: lint_result = True if run_coverage: print('\nRunning tests (via coverage.py %s)' % coverage.__version__) sys.stdout.flush() tests_result = run_coverage(ci=True) else: print('\nRunning tests') sys.stdout.flush() tests_result = run_tests(ci=True) sys.stdout.flush() return lint_result and tests_result
import directive import builder #=========================================================================== # Node visitor functions def visit_passthrough(self, node): pass def depart_passthrough(self, node): pass passthrough = (visit_passthrough, depart_passthrough) #=========================================================================== # Setup and register extension def setup(app): app.add_node(directive.latex_document, html=passthrough) app.add_directive("latex-document", directive.LatexDocumentDirective) app.add_builder(builder.MultiLatexBuilder) return {"version": "0.0"}
Set LaTeX builder to skip latex_document nodes This stops Sphinx' built-in LaTeX builder from complaining about unknown latex_document node type.
import directive import builder #=========================================================================== # Node visitor functions def visit_passthrough(self, node): pass def depart_passthrough(self, node): pass passthrough = (visit_passthrough, depart_passthrough) #=========================================================================== # Setup and register extension def setup(app): app.add_node(directive.latex_document, latex=passthrough, html=passthrough) app.add_directive("latex-document", directive.LatexDocumentDirective) app.add_builder(builder.MultiLatexBuilder) return {"version": "0.0"}
import csv import re import os from urlparse import urlparse from elasticsearch import Elasticsearch if os.environ.get('BONSAI_URL'): url = urlparse(os.environ['BONSAI_URL']) bonsai_tuple = url.netloc.partition('@') ELASTICSEARCH_HOST = bonsai_tuple[2] ELASTICSEARCH_AUTH = bonsai_tuple[0] es = Elasticsearch([{'host': ELASTICSEARCH_HOST}], http_auth=ELASTICSEARCH_AUTH) else: es = Elasticsearch() with open('data/ParcelCentroids.csv', 'r') as csvfile: print "open file" csv_reader = csv.DictReader(csvfile, fieldnames=[], restkey='undefined-fieldnames', delimiter=',') current_row = 0 for row in csv_reader: current_row += 1 if current_row == 1: csv_reader.fieldnames = row['undefined-fieldnames'] continue address = row if re.match('\d+', address['PVANUM']): es.index(index='addresses', doc_type='address', id=address['PVANUM'], body={'PVANUM': address['PVANUM'], 'NUM1': address['NUM1'], 'NAME': address['NAME'], 'TYPE': address['TYPE'], 'ADDRESS': address['ADDRESS'], 'UNIT': address['UNIT'], 'X': address['X'], 'Y': address['Y']}) csvfile.close()
Change index to OpenAddresses schema
import sys import csv import re import os from urlparse import urlparse from elasticsearch import Elasticsearch if os.environ.get('BONSAI_URL'): url = urlparse(os.environ['BONSAI_URL']) bonsai_tuple = url.netloc.partition('@') ELASTICSEARCH_HOST = bonsai_tuple[2] ELASTICSEARCH_AUTH = bonsai_tuple[0] es = Elasticsearch([{'host': ELASTICSEARCH_HOST}], http_auth=ELASTICSEARCH_AUTH) else: es = Elasticsearch() files_given = sys.argv for file_name in files_given: if file_name = 'index_addresses.py': continue else: file_path = file_name print 'adding ' + file_path with open(file_path, 'r') as csvfile: print "open file" csv_reader = csv.DictReader(csvfile, fieldnames=[], restkey='undefined-fieldnames', delimiter=',') current_row = 0 for row in csv_reader: current_row += 1 if current_row == 1: csv_reader.fieldnames = row['undefined-fieldnames'] continue address = row es.index(index='addresses', doc_type='address', id=current_row-1, body={'NUMBER': address[' NUMBER'], 'STREET': address[' STREET'], 'ADDRESS': address[' NUMBER'] + ' ' + address[' STREET'], 'X': address['LON'], 'Y': address[' LAT']}) csvfile.close()
#!/usr/bin/env python3 """Main executable for ShivyC compiler For usage, run "./shivyc.py --help". """ import argparse def get_arguments(): """Set up the argument parser and return an object storing the argument values. return - An object storing argument values, as returned by argparse.parse_args() """ parser = argparse.ArgumentParser(description="Compile C files.") # The C file to compile parser.add_argument("file_name") return parser.parse_args() def main(): """Run the compiler """ arguments = get_arguments() print(arguments) if __name__ == "__main__": main()
Rename file_name argument on command line
#!/usr/bin/env python3 """Main executable for ShivyC compiler For usage, run "./shivyc.py --help". """ import argparse def get_arguments(): """Set up the argument parser and return an object storing the argument values. return - An object storing argument values, as returned by argparse.parse_args() """ parser = argparse.ArgumentParser(description="Compile C files.") # The file name of the C file to compile. The file name gets saved to the # file_name attribute of the returned object, but this parameter appears as # "filename" (no underscore) on the command line. parser.add_argument("file_name", metavar="filename") return parser.parse_args() def main(): """Run the compiler """ arguments = get_arguments() print(arguments) if __name__ == "__main__": main()
import sys if not (2, 6) <= sys.version_info < (3,): sys.exit(u'Mopidy requires Python >= 2.6, < 3') from subprocess import PIPE, Popen VERSION = (0, 4, 0) def get_git_version(): process = Popen(['git', 'describe'], stdout=PIPE, stderr=PIPE) if process.wait() != 0: raise Exception('Execution of "git describe" failed') version = process.stdout.read().strip() if version.startswith('v'): version = version[1:] return version def get_plain_version(): return '.'.join(map(str, VERSION)) def get_version(): try: return get_git_version() except Exception: return get_plain_version() class MopidyException(Exception): def __init__(self, message, *args, **kwargs): super(MopidyException, self).__init__(message, *args, **kwargs) self._message = message @property def message(self): """Reimplement message field that was deprecated in Python 2.6""" return self._message @message.setter def message(self, message): self._message = message class SettingsError(MopidyException): pass class OptionalDependencyError(MopidyException): pass from mopidy import settings as default_settings_module from mopidy.utils.settings import SettingsProxy settings = SettingsProxy(default_settings_module)
Raise EnvironmentError instead of Exception to make pylint happy
import sys if not (2, 6) <= sys.version_info < (3,): sys.exit(u'Mopidy requires Python >= 2.6, < 3') from subprocess import PIPE, Popen VERSION = (0, 4, 0) def get_git_version(): process = Popen(['git', 'describe'], stdout=PIPE, stderr=PIPE) if process.wait() != 0: raise EnvironmentError('Execution of "git describe" failed') version = process.stdout.read().strip() if version.startswith('v'): version = version[1:] return version def get_plain_version(): return '.'.join(map(str, VERSION)) def get_version(): try: return get_git_version() except EnvironmentError: return get_plain_version() class MopidyException(Exception): def __init__(self, message, *args, **kwargs): super(MopidyException, self).__init__(message, *args, **kwargs) self._message = message @property def message(self): """Reimplement message field that was deprecated in Python 2.6""" return self._message @message.setter def message(self, message): self._message = message class SettingsError(MopidyException): pass class OptionalDependencyError(MopidyException): pass from mopidy import settings as default_settings_module from mopidy.utils.settings import SettingsProxy settings = SettingsProxy(default_settings_module)
#!/usr/bin/env python # Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Smoke-tests 'settings' blocks. """ import TestGyp test = TestGyp.TestGyp() test.run_gyp('settings.gyp') test.build('test.gyp', test.ALL) test.pass_test()
Make new settings test not run for xcode generator. TBR=evan Review URL: http://codereview.chromium.org/7472006
#!/usr/bin/env python # Copyright (c) 2011 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Smoke-tests 'settings' blocks. """ import TestGyp # 'settings' is only supported for make and scons (and will be removed there as # well eventually). test = TestGyp.TestGyp(formats=['make', 'scons']) test.run_gyp('settings.gyp') test.build('test.gyp', test.ALL) test.pass_test()
import sublime import sublime_plugin try: from GitGutter.view_collection import ViewCollection except ImportError: from view_collection import ViewCollection class GitGutterBaseChangeCommand(sublime_plugin.WindowCommand): def run(self): view = self.window.active_view() inserted, modified, deleted = ViewCollection.diff(view) all_changes = sorted(inserted + modified + deleted) row, col = view.rowcol(view.sel()[0].begin()) current_row = row + 1 line = self.jump(all_changes, current_row) self.window.active_view().run_command("goto_line", {"line": line}) class GitGutterNextChangeCommand(GitGutterBaseChangeCommand): def jump(self, all_changes, current_row): return next((change for change in all_changes if change > current_row), current_row) class GitGutterPrevChangeCommand(GitGutterBaseChangeCommand): def jump(self, all_changes, current_row): return next((change for change in reversed(all_changes) if change < current_row), current_row)
Make lines jumps only jump to blocks over changes Instead of every line in a block of modifications which is tedious
import sublime import sublime_plugin try: from GitGutter.view_collection import ViewCollection except ImportError: from view_collection import ViewCollection class GitGutterBaseChangeCommand(sublime_plugin.WindowCommand): def lines_to_blocks(self, lines): blocks = [] last_line = -2 for line in lines: if line > last_line+1: blocks.append(line) last_line = line return blocks def run(self): view = self.window.active_view() inserted, modified, deleted = ViewCollection.diff(view) inserted = self.lines_to_blocks(inserted) modified = self.lines_to_blocks(modified) all_changes = sorted(inserted + modified + deleted) row, col = view.rowcol(view.sel()[0].begin()) current_row = row + 1 line = self.jump(all_changes, current_row) self.window.active_view().run_command("goto_line", {"line": line}) class GitGutterNextChangeCommand(GitGutterBaseChangeCommand): def jump(self, all_changes, current_row): return next((change for change in all_changes if change > current_row), current_row) class GitGutterPrevChangeCommand(GitGutterBaseChangeCommand): def jump(self, all_changes, current_row): return next((change for change in reversed(all_changes) if change < current_row), current_row)
class defaultdict(dict): """Poor man's implementation of defaultdict for Python 2.4 """ def __init__(self, default_factory=None, **kwargs): self.default_factory = default_factory super(defaultdict, self).__init__(**kwargs) def __getitem__(self, key): if self.default_factory is None: return super(defaultdict, self).__getitem__(key) else: try: return super(defaultdict, self).__getitem__(key) except KeyError: return self.default_factory()
MB-6867: Save generated value for defaultdict Instead of just returning value, keep it in dict. Change-Id: I2a9862503b71f2234a4a450c48998b5f53a951bc Reviewed-on: http://review.couchbase.org/21602 Tested-by: Bin Cui <[email protected]> Reviewed-by: Pavel Paulau <[email protected]>
class defaultdict(dict): """Poor man's implementation of defaultdict for Python 2.4 """ def __init__(self, default_factory=None, **kwargs): self.default_factory = default_factory super(defaultdict, self).__init__(**kwargs) def __getitem__(self, key): if self.default_factory is None: return super(defaultdict, self).__getitem__(key) else: try: return super(defaultdict, self).__getitem__(key) except KeyError: self[key] = self.default_factory() return self[key]
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80: # python-quilt - A Python implementation of the quilt patch system # # Copyright (C) 2012 Björn Ricks <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA from quilt.cli.meta import Command from quilt.db import Series class SeriesCommand(Command): usage = "%prog series" name = "series" def run(self, option, args): series = Series(self.get_patches_dir()) for patch in series.patches(): print patch
Remove whitespace at end of line
# vim: fileencoding=utf-8 et sw=4 ts=4 tw=80: # python-quilt - A Python implementation of the quilt patch system # # Copyright (C) 2012 Björn Ricks <[email protected]> # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301 USA from quilt.cli.meta import Command from quilt.db import Series class SeriesCommand(Command): usage = "%prog series" name = "series" def run(self, option, args): series = Series(self.get_patches_dir()) for patch in series.patches(): print patch
import jsonschema CORE_SCHEMA = { 'type': 'object', 'properties': { 'paradigm': { 'type': 'string', }, 'method': { 'type': 'string', }, }, 'additionalProperties': False, } VALIDATORS = { 'core': CORE_SCHEMA } def validate(request, schema_name): jsonschema.validate(request, VALIDATORS[schema_name])
Add 'address' field to core schema
import jsonschema CORE_SCHEMA = { 'type': 'object', 'properties': { 'paradigm': { 'type': 'string', }, 'method': { 'type': 'string', }, 'address': { 'type': 'string', }, }, 'additionalProperties': False, } VALIDATORS = { 'core': CORE_SCHEMA } def validate(request, schema_name): jsonschema.validate(request, VALIDATORS[schema_name])
""" Misc. general utility functions, not tied to Kubespawner directly """ from concurrent.futures import ThreadPoolExecutor from jupyterhub.utils import DT_MIN, DT_MAX, DT_SCALE from tornado import gen, ioloop from traitlets.config import SingletonConfigurable class SingletonExecutor(SingletonConfigurable, ThreadPoolExecutor): """ Simple wrapper to ThreadPoolExecutor that is also a singleton. We want one ThreadPool that is used by all the spawners, rather than one ThreadPool per spawner! """ pass @gen.coroutine def exponential_backoff(func, fail_message, timeout=10, *args, **kwargs): loop = ioloop.IOLoop.current() tic = loop.time() dt = DT_MIN while dt > 0: if func(*args, **kwargs): return else: yield gen.sleep(dt) dt = min(dt * DT_SCALE, DT_MAX, timeout - (loop.time() - tic)) raise TimeoutError(fail_message)
Add random jitter to the exponential backoff function
""" Misc. general utility functions, not tied to Kubespawner directly """ from concurrent.futures import ThreadPoolExecutor import random from jupyterhub.utils import DT_MIN, DT_MAX, DT_SCALE from tornado import gen, ioloop from traitlets.config import SingletonConfigurable class SingletonExecutor(SingletonConfigurable, ThreadPoolExecutor): """ Simple wrapper to ThreadPoolExecutor that is also a singleton. We want one ThreadPool that is used by all the spawners, rather than one ThreadPool per spawner! """ pass @gen.coroutine def exponential_backoff(func, fail_message, timeout=10, *args, **kwargs): loop = ioloop.IOLoop.current() start_tic = loop.time() dt = DT_MIN while True: if (loop.time() - start_tic) > timeout: # We time out! break if func(*args, **kwargs): return else: yield gen.sleep(dt) # Add some random jitter to improve performance # This makes sure that we don't overload any single iteration # of the tornado loop with too many things # See https://www.awsarchitectureblog.com/2015/03/backoff.html # for a good example of why and how this helps dt = min(DT_MAX, (1 + random.random()) * (dt * DT_SCALE)) raise TimeoutError(fail_message)
# -*- coding: utf-8 -*- from werobot.session import SessionStorage from werobot.utils import json_loads, json_dumps class MongoDBStorage(SessionStorage): """ MongoDBStorage 会把你的 Session 数据储存在一个 MongoDB Collection 中 :: import pymongo import werobot from werobot.session.mongodbstorage import MongoDBStorage collection = pymongo.MongoClient()["wechat"]["session"] session_storage = MongoDBStorage(collection) robot = werobot.WeRoBot(token="token", enable_session=True, session_storage=session_storage) 你需要安装 ``pymongo`` 才能使用 MongoDBStorage 。 :param collection: 一个 MongoDB Collection。 """ def __init__(self, collection): import pymongo assert isinstance(collection, pymongo.collection.Collection) self.collection = collection collection.create_index("wechat_id") def _get_document(self, id): return self.collection.find_one({"wechat_id": id}) def get(self, id): document = self._get_document(id) if document: session_json = document["session"] return json_loads(session_json) return {} def set(self, id, value): document = self._get_document(id) session = json_dumps(value) if document: document["session"] = session self.collection.save(document) else: self.collection.insert({ "wechat_id": id, "session": session }) def delete(self, id): document = self._get_document(id) if document: self.collection.remove(document["_id"])
Use new pymongo API in MongoDBStorage
# -*- coding: utf-8 -*- from werobot.session import SessionStorage from werobot.utils import json_loads, json_dumps class MongoDBStorage(SessionStorage): """ MongoDBStorage 会把你的 Session 数据储存在一个 MongoDB Collection 中 :: import pymongo import werobot from werobot.session.mongodbstorage import MongoDBStorage collection = pymongo.MongoClient()["wechat"]["session"] session_storage = MongoDBStorage(collection) robot = werobot.WeRoBot(token="token", enable_session=True, session_storage=session_storage) 你需要安装 ``pymongo`` 才能使用 MongoDBStorage 。 :param collection: 一个 MongoDB Collection。 """ def __init__(self, collection): self.collection = collection collection.create_index("wechat_id") def _get_document(self, id): return self.collection.find_one({"wechat_id": id}) def get(self, id): document = self._get_document(id) if document: session_json = document["session"] return json_loads(session_json) return {} def set(self, id, value): session = json_dumps(value) self.collection.replace_one({ "wechat_id": id }, { "wechat_id": id, "session": session }, upsert=True) def delete(self, id): self.collection.delete_one({ "wechat_id": id })
#!/usr/bin/env python -u from __future__ import absolute_import, division, print_function, unicode_literals import csv from gratipay import wireup from gratipay.models.exchange_route import ExchangeRoute from gratipay.models.participant import Participant from gratipay.billing.exchanges import record_exchange db = wireup.db(wireup.env()) inp = csv.reader(open('balanced/refund/refunds.completed.csv')) note = 'refund of advance payment; see https://medium.com/gratipay-blog/charging-in-arrears-18cacf779bee' for ts, id, amount, username, route_id, status_code, content in inp: if status_code != '201': continue amount = '-' + amount[:-2] + '.' + amount[-2:] print('posting {} back for {}'.format(amount, username)) route = ExchangeRoute.from_id(route_id) rp = route.participant participant = Participant.from_id(rp) if type(rp) is long else rp # Such a hack. :( route.set_attributes(participant=participant) record_exchange(db, route, amount, 0, participant, 'pending', note)
Update post-back script for Braintree
#!/usr/bin/env python -u from __future__ import absolute_import, division, print_function, unicode_literals import csv from decimal import Decimal as D from gratipay import wireup from gratipay.models.exchange_route import ExchangeRoute from gratipay.models.participant import Participant from gratipay.billing.exchanges import record_exchange db = wireup.db(wireup.env()) inp = csv.reader(open('refunds.completed.csv')) note = 'refund of advance payment; see https://medium.com/gratipay-blog/18cacf779bee' total = N = 0 for ts, id, amount, username, route_id, success, ref in inp: print('posting {} back for {}'.format(amount, username)) assert success == 'True' total += D(amount) N += 1 amount = D('-' + amount) route = ExchangeRoute.from_id(route_id) # Such a hack. :( rp = route.participant participant = Participant.from_id(rp) if type(rp) is long else rp route.set_attributes(participant=participant) exchange_id = record_exchange(db, route, amount, 0, participant, 'pending', note) db.run("update exchanges set ref=%s where id=%s", (ref, exchange_id)) print('posted {} back for {}'.format(total, N))
# -*- coding: utf-8 -*- from flask_wtf import Form from flask_wtf.recaptcha import RecaptchaField, Recaptcha from wtforms import StringField, validators from .validation import not_blacklisted_nor_spam class ShortenedURLForm(Form): url = StringField( validators=[ validators.DataRequired(), validators.URL(message="A valid URL is required"), not_blacklisted_nor_spam ] ) recaptcha = RecaptchaField( validators=[ Recaptcha( "Please click on the reCAPTCHA field to prove you are a human" ) ] )
Replace double quotes with single quotes as string delimiters This commit replaces double quotes with single quotes as string delimiters to improve consistency.
# -*- coding: utf-8 -*- from flask_wtf import Form from flask_wtf.recaptcha import RecaptchaField, Recaptcha from wtforms import StringField, validators from .validation import not_blacklisted_nor_spam class ShortenedURLForm(Form): url = StringField( validators=[ validators.DataRequired(), validators.URL(message='A valid URL is required'), not_blacklisted_nor_spam ] ) recaptcha = RecaptchaField( validators=[ Recaptcha( 'Please click on the reCAPTCHA field to prove you are a human' ) ] )
from superdesk.base_model import BaseModel def init_app(app): CoverageModel(app=app) def rel(resource, embeddable=False): return { 'type': 'objectid', 'data_relation': {'resource': resource, 'field': '_id', 'embeddable': embeddable} } class CoverageModel(BaseModel): endpoint_name = 'coverages' schema = { 'headline': {'type': 'string'}, 'type': {'type': 'string'}, 'ed_note': {'type': 'string'}, 'scheduled': {'type': 'datetime'}, 'delivery': rel('archive'), 'assigned_user': rel('users', True), 'assigned_desk': rel('desks', True), 'planning_item': rel('planning'), }
Fix data relation not working for custom Guids
from superdesk.base_model import BaseModel def init_app(app): CoverageModel(app=app) def rel(resource, embeddable=False): return { 'type': 'objectid', 'data_relation': {'resource': resource, 'field': '_id', 'embeddable': embeddable} } class CoverageModel(BaseModel): endpoint_name = 'coverages' schema = { 'headline': {'type': 'string'}, 'type': {'type': 'string'}, 'ed_note': {'type': 'string'}, 'scheduled': {'type': 'datetime'}, 'delivery': {'type': 'string'}, 'assigned_user': rel('users', True), 'assigned_desk': rel('desks', True), 'planning_item': {'type': 'string'}, }
from suelta.util import bytes from suelta.sasl import Mechanism, register_mechanism try: import urlparse except ImportError: import urllib.parse as urlparse class X_FACEBOOK_PLATFORM(Mechanism): def __init__(self, sasl, name): super(X_FACEBOOK_PLATFORM, self).__init__(sasl, name) self.check_values(['access_token', 'api_key']) def process(self, challenge=None): if challenge is not None: values = {} for kv in challenge.split('&'): key, value = kv.split('=') values[key] = value resp_data = { 'method': values['method'], 'v': '1.0', 'call_id': '1.0', 'nonce': values['nonce'], 'access_token': self.values['access_token'], 'api_key': self.values['api_key'] } resp = '&'.join(['%s=%s' % (k, v) for k, v in resp_data.items()]) return bytes(resp) return bytes('') def okay(self): return True register_mechanism('X-FACEBOOK-PLATFORM', 40, X_FACEBOOK_PLATFORM, use_hashes=False)
Work around Python3's byte semantics.
from suelta.util import bytes from suelta.sasl import Mechanism, register_mechanism try: import urlparse except ImportError: import urllib.parse as urlparse class X_FACEBOOK_PLATFORM(Mechanism): def __init__(self, sasl, name): super(X_FACEBOOK_PLATFORM, self).__init__(sasl, name) self.check_values(['access_token', 'api_key']) def process(self, challenge=None): if challenge is not None: values = {} for kv in challenge.split(b'&'): key, value = kv.split(b'=') values[key] = value resp_data = { b'method': values[b'method'], b'v': b'1.0', b'call_id': b'1.0', b'nonce': values[b'nonce'], b'access_token': self.values['access_token'], b'api_key': self.values['api_key'] } resp = '&'.join(['%s=%s' % (k, v) for k, v in resp_data.items()]) return bytes(resp) return b'' def okay(self): return True register_mechanism('X-FACEBOOK-PLATFORM', 40, X_FACEBOOK_PLATFORM, use_hashes=False)
from flask import Flask import logging import logging.config import RPi.GPIO as GPIO from .config import config, config_loader from .channel import ChannelFactory app = Flask('rpi_gpio_http') logging.config.dictConfig(config['logger']) logger = logging.getLogger(__name__) logger.info("Config loaded from %s" % config_loader.filename) channels = {} GPIO.setmode(GPIO.BOARD) for ch in config['channels']: if ch['enabled'] != True: continue channel = ChannelFactory.create(ch) if channel: channels[channel.pin] = channel import controllers
Disable warnings in GPIO lib
from flask import Flask import logging import logging.config import RPi.GPIO as GPIO from .config import config, config_loader from .channel import ChannelFactory app = Flask('rpi_gpio_http') logging.config.dictConfig(config['logger']) logger = logging.getLogger(__name__) logger.info("Config loaded from %s" % config_loader.filename) channels = {} GPIO.setwarnings(False) GPIO.setmode(GPIO.BOARD) for ch in config['channels']: if ch['enabled'] != True: continue channel = ChannelFactory.create(ch) if channel: channels[channel.pin] = channel import controllers
#!/usr/bin/env python import urllib.parse import urllib.request def create_player(username, password, email): url = 'https://localhost:3000/players' values = {'username' : username, 'password' : password, 'email' : email } data = urllib.parse.urlencode(values) data = data.encode('utf-8') # data should be bytes req = urllib.request.Request(url, data) response = urllib.request.urlopen(req) the_page = response.read() print("Created user \'{}\' with password \'{}\' and email \'{}\'".format(username, password, email)) if __name__ == '__main__': create_player("chapmang", "password", "[email protected]") create_player("idlee", "deadparrot", "[email protected]") create_player("gilliamt", "lumberjack", "[email protected]") create_player("jonest", "trojanrabbit", "[email protected]") create_player("cleesej", "generaldirection", "[email protected]") create_player("palinm", "fleshwound", "[email protected]")
Switch to requests library instead of urllib
#!/usr/bin/env python import requests def create_player(username, password, email): url = 'https://localhost:3000/players' values = {'username' : username, 'password' : password, 'email' : email } r = requests.post(url, params=values, verify=False) r.raise_for_status() if (r.status_code == 201): print("Created user \'{}\' with password \'{}\' and email \'{}\'".format(username, password, email)) if __name__ == '__main__': create_player("chapmang", "password", "[email protected]") create_player("idlee", "deadparrot", "[email protected]") create_player("gilliamt", "lumberjack", "[email protected]") create_player("jonest", "trojanrabbit", "[email protected]") create_player("cleesej", "generaldirection", "[email protected]") create_player("palinm", "fleshwound", "[email protected]")
import os import unittest import shutil from bcbio.rnaseq import cufflinks from bcbio.utils import file_exists, safe_makedir from nose.plugins.attrib import attr DATA_DIR = os.path.join(os.path.dirname(__file__), "bcbio-nextgen-test-data", "data") class TestCufflinks(unittest.TestCase): merged_gtf = os.path.join(DATA_DIR, "cufflinks", "merged.gtf") ref_gtf = os.path.join(DATA_DIR, "cufflinks", "ref-transcripts.gtf") out_dir = "cufflinks-test" def setUp(self): safe_makedir(self.out_dir) @attr("unit") def test_cufflinks_clean(self): clean_fn = os.path.join(self.out_dir, "clean.gtf") dirty_fn = os.path.join(self.out_dir, "dirty.gtf") clean, dirty = cufflinks.clean_assembly(self.merged_gtf, clean_fn, dirty_fn) # fixed_fn = os.path.join(self.out_dir, "fixed.gtf") # fixed = cufflinks.fix_cufflinks_attributes(self.ref_gtf, clean, fixed_fn) assert(file_exists(clean)) assert(os.path.exists(dirty)) # assert(file_exists(fixed)) def tearDown(self): shutil.rmtree(self.out_dir)
Remove some cruft from the cufflinks test.
import os import unittest import shutil from bcbio.rnaseq import cufflinks from bcbio.utils import file_exists, safe_makedir from nose.plugins.attrib import attr DATA_DIR = os.path.join(os.path.dirname(__file__), "bcbio-nextgen-test-data", "data") class TestCufflinks(unittest.TestCase): merged_gtf = os.path.join(DATA_DIR, "cufflinks", "merged.gtf") ref_gtf = os.path.join(DATA_DIR, "cufflinks", "ref-transcripts.gtf") out_dir = "cufflinks-test" def setUp(self): safe_makedir(self.out_dir) @attr("unit") def test_cufflinks_clean(self): clean_fn = os.path.join(self.out_dir, "clean.gtf") dirty_fn = os.path.join(self.out_dir, "dirty.gtf") clean, dirty = cufflinks.clean_assembly(self.merged_gtf, clean_fn, dirty_fn) assert(file_exists(clean)) assert(os.path.exists(dirty)) def tearDown(self): shutil.rmtree(self.out_dir)
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import logging from datetime import datetime from update_wrapper import UpdateWrapper if not os.path.isdir("log"): os.mkdir("log") logging.basicConfig( filename="log/{}.log".format(datetime.now().strftime("%Y%m%d%H%M%S%f")), level=logging.DEBUG) logging.captureWarnings(True) wrapper = UpdateWrapper() wrapper.read_config("config.json") wrapper.run()
Move log file to constant
#!/usr/bin/env python # -*- coding: utf-8 -*- import os import logging from datetime import datetime from update_wrapper import UpdateWrapper if not os.path.isdir("log"): os.mkdir("log") LOG_FILE = datetime.now().strftime("%Y%m%d%H%M%S%f") logging.basicConfig( filename="log/{}.log".format(LOG_FILE), level=logging.DEBUG) logging.captureWarnings(True) wrapper = UpdateWrapper() wrapper.read_config("config.json") wrapper.run()
""" Transit Status """ from flask import Flask, render_template import json import settings app = Flask(__name__) app.debug = settings.DEBUG @app.route("/") def root(): wifi = {'WIFI_NAME': settings.WIFI_NAME, 'WIFI_PASSWORD': settings.WIFI_PASSWORD} return render_template('home.html', stops = json.dumps(settings.STOPS), wifi=wifi) if __name__ == "__main__": app.run(host='0.0.0.0', port=9001)
Add endpoint for getting today's Giants game
""" Transit Status """ from flask import Flask, render_template import json import mlb_schedule import settings app = Flask(__name__) app.debug = settings.DEBUG @app.route("/") def root(): wifi = {'WIFI_NAME': settings.WIFI_NAME, 'WIFI_PASSWORD': settings.WIFI_PASSWORD} return render_template('home.html', stops = json.dumps(settings.STOPS), wifi=wifi) @app.route("/giants_schedule") def giants_schedule(): return json.dumps(mlb_schedule.get_todays_game()) if __name__ == "__main__": app.run(host='0.0.0.0', port=9001)
from expects import expect, equal from primestg.report import Report from ast import literal_eval with description('Report S06 example'): with before.all: self.data_filenames = [ 'spec/data/S06.xml', # 'spec/data/S06_empty.xml' ] self.report = [] for data_filename in self.data_filenames: with open(data_filename) as data_file: self.report.append(Report(data_file)) with it('generates the expected results for the whole report'): result_filenames = [] for data_filename in self.data_filenames: result_filenames.append('{}_result.txt'.format(data_filename)) for key, result_filename in enumerate(result_filenames): with open(result_filename) as result_file: result_string = result_file.read() expected_result = literal_eval(result_string) result = self.report[key].values expect(result).to(equal(expected_result)) # result_filename = '{}_result.txt'.format(self.data_filename) # # with open(result_filename) as result_file: # result_string = result_file.read() # self.expected_result = literal_eval(result_string) # # result = self.report.values # # expect(result).to(equal(self.expected_result))
TEST for correct an with errors S06 report
from expects import expect, equal from primestg.report import Report from ast import literal_eval with description('Report S06 example'): with before.all: self.data_filenames = [ 'spec/data/S06.xml', 'spec/data/S06_with_error.xml', # 'spec/data/S06_empty.xml' ] self.report = [] for data_filename in self.data_filenames: with open(data_filename) as data_file: self.report.append(Report(data_file)) with it('generates the expected results for the whole report'): result_filenames = [] warnings = [] for data_filename in self.data_filenames: result_filenames.append('{}_result.txt'.format(data_filename)) for key, result_filename in enumerate(result_filenames): result = [] with open(result_filename) as result_file: result_string = result_file.read() expected_result = literal_eval(result_string) for cnc in self.report[key].concentrators: if cnc.meters: for meter in cnc.meters: for value in meter.values: result.append(value) warnings.append(meter.warnings) print('Result: {} \n Expected result: {} \n Warnings: {}'.format( result, expected_result, warnings)) expect(result).to(equal(expected_result)) expected_warnings = [[], ["ERROR: Cnc(CIR4621704174), " "Meter(ZIV42553686). Thrown exception: " "object of type 'NoneType' has no len()"], []] expect(warnings).to(equal(expected_warnings))
# -*- coding: utf-8 -*- from django.contrib.auth.mixins import LoginRequiredMixin from django.http import HttpResponseRedirect from django.urls import reverse from django.views.generic.base import TemplateView from django.views.generic.detail import DetailView from babybuddy.mixins import PermissionRequired403Mixin from core.models import Child class Dashboard(LoginRequiredMixin, TemplateView): # TODO: Use .card-deck in this template once BS4 is finalized. template_name = 'dashboard/dashboard.html' # Show the overall dashboard or a child dashboard if one Child instance. def get(self, request, *args, **kwargs): children = Child.objects.count() if children == 0: return HttpResponseRedirect(reverse('babybuddy:welcome')) elif children == 1: return HttpResponseRedirect( reverse( 'dashboard:dashboard-child', args={Child.objects.first().slug} ) ) return super(Dashboard, self).get(request, *args, **kwargs) def get_context_data(self, **kwargs): context = super(Dashboard, self).get_context_data(**kwargs) context['objects'] = Child.objects.all().order_by('last_name') return context class ChildDashboard(PermissionRequired403Mixin, DetailView): model = Child permission_required = ('core.view_child',) raise_exception = True template_name = 'dashboard/child.html'
Add dashboard sort clauses: first name and id I have seen the dashboard sorting be inconsistent. Last name then first name then id should produce a consistent and predictable sort order.
# -*- coding: utf-8 -*- from django.contrib.auth.mixins import LoginRequiredMixin from django.http import HttpResponseRedirect from django.urls import reverse from django.views.generic.base import TemplateView from django.views.generic.detail import DetailView from babybuddy.mixins import PermissionRequired403Mixin from core.models import Child class Dashboard(LoginRequiredMixin, TemplateView): # TODO: Use .card-deck in this template once BS4 is finalized. template_name = 'dashboard/dashboard.html' # Show the overall dashboard or a child dashboard if one Child instance. def get(self, request, *args, **kwargs): children = Child.objects.count() if children == 0: return HttpResponseRedirect(reverse('babybuddy:welcome')) elif children == 1: return HttpResponseRedirect( reverse( 'dashboard:dashboard-child', args={Child.objects.first().slug} ) ) return super(Dashboard, self).get(request, *args, **kwargs) def get_context_data(self, **kwargs): context = super(Dashboard, self).get_context_data(**kwargs) context['objects'] = Child.objects.all() \ .order_by('last_name', 'first_name', 'id') return context class ChildDashboard(PermissionRequired403Mixin, DetailView): model = Child permission_required = ('core.view_child',) raise_exception = True template_name = 'dashboard/child.html'
class ResultPlugin(object): """ Captures the TestResult object for later inspection. nose doesn't return the full test result object from any of its runner methods. Pass an instance of this plugin to the TestProgram and use ``result`` after running the tests to get the TestResult object. """ name = "result" enabled = True def finalize(self, result): self.result = result class DjangoSetUpPlugin(object): """ Configures Django to setup and tear down the environment. This allows coverage to report on all code imported and used during the initialisation of the test runner. """ name = "django setup" enabled = True # We need this to run before the coverage plugin (which has a score # of 500), so that we still have a stdout for the user interaction # Django sometimes wants to do during test database setup. score = 700 def __init__(self, runner): super(DjangoSetUpPlugin, self).__init__() self.runner = runner def begin(self): """Setup the environment""" self.runner.setup_test_environment() self.old_names = self.runner.setup_databases() def finalize(self, result): """Destroy the environment""" self.runner.teardown_databases(self.old_names) self.runner.teardown_test_environment()
Allow coverage to work and keep stdout and be activated before initial imports.
import sys class ResultPlugin(object): """ Captures the TestResult object for later inspection. nose doesn't return the full test result object from any of its runner methods. Pass an instance of this plugin to the TestProgram and use ``result`` after running the tests to get the TestResult object. """ name = "result" enabled = True def finalize(self, result): self.result = result class DjangoSetUpPlugin(object): """ Configures Django to setup and tear down the environment. This allows coverage to report on all code imported and used during the initialisation of the test runner. """ name = "django setup" enabled = True def __init__(self, runner): super(DjangoSetUpPlugin, self).__init__() self.runner = runner self.sys_stdout = sys.stdout def begin(self): """Setup the environment""" sys_stdout = sys.stdout sys.stdout = self.sys_stdout self.runner.setup_test_environment() self.old_names = self.runner.setup_databases() sys.stdout = sys_stdout def finalize(self, result): """Destroy the environment""" self.runner.teardown_databases(self.old_names) self.runner.teardown_test_environment()
import logging import os import sys import urllib logger = logging.getLogger('mopidy.utils.path') def get_or_create_folder(folder): folder = os.path.expanduser(folder) if not os.path.isdir(folder): logger.info(u'Creating dir %s', folder) os.mkdir(folder, 0755) return folder def get_or_create_file(filename): filename = os.path.expanduser(filename) if not os.path.isfile(filename): logger.info(u'Creating file %s', filename) open(filename, 'w') return filename def path_to_uri(*paths): path = os.path.join(*paths) #path = os.path.expanduser(path) # FIXME Waiting for test case? path = path.encode('utf-8') if sys.platform == 'win32': return 'file:' + urllib.pathname2url(path) return 'file://' + urllib.pathname2url(path)
Add helper for finding files in folder
import logging import os import sys import urllib logger = logging.getLogger('mopidy.utils.path') def get_or_create_folder(folder): folder = os.path.expanduser(folder) if not os.path.isdir(folder): logger.info(u'Creating dir %s', folder) os.mkdir(folder, 0755) return folder def get_or_create_file(filename): filename = os.path.expanduser(filename) if not os.path.isfile(filename): logger.info(u'Creating file %s', filename) open(filename, 'w') return filename def path_to_uri(*paths): path = os.path.join(*paths) #path = os.path.expanduser(path) # FIXME Waiting for test case? path = path.encode('utf-8') if sys.platform == 'win32': return 'file:' + urllib.pathname2url(path) return 'file://' + urllib.pathname2url(path) def find_files(folder): for dirpath, dirnames, filenames in os.walk(folder): for filename in filenames: dirpath = os.path.abspath(dirpath) yield os.path.join(dirpath, filename)
from django.db import models from django.utils.translation import ugettext_lazy as _ from parler.models import TranslatableModel, TranslatedFields from fluent_contents.models import ContentItem, PlaceholderField class SharedContent(TranslatableModel): """ The parent hosting object for shared content """ translations = TranslatedFields( title = models.CharField(_("Title"), max_length=200) ) slug = models.SlugField(_("Template code"), unique=True, help_text=_("This unique name can be used refer to this content in in templates.")) contents = PlaceholderField("shared_content", verbose_name=_("Contents")) # NOTE: settings such as "template_name", and which plugins are allowed can be added later. class Meta: verbose_name = _("Shared content") verbose_name_plural = _("Shared content") def __unicode__(self): return self.title class SharedContentItem(ContentItem): """ The contentitem to include in a page. """ shared_content = models.ForeignKey(SharedContent, verbose_name=_('Shared content'), related_name='shared_content_items') class Meta: verbose_name = _('Shared content') verbose_name_plural = _('Shared content') def __unicode__(self): return unicode(self.shared_content)
Add ContentItemRelation to SharedContent model Displays objects in the admin delete screen.
from django.db import models from django.utils.translation import ugettext_lazy as _ from parler.models import TranslatableModel, TranslatedFields from fluent_contents.models import ContentItem, PlaceholderField, ContentItemRelation class SharedContent(TranslatableModel): """ The parent hosting object for shared content """ translations = TranslatedFields( title = models.CharField(_("Title"), max_length=200) ) slug = models.SlugField(_("Template code"), unique=True, help_text=_("This unique name can be used refer to this content in in templates.")) contents = PlaceholderField("shared_content", verbose_name=_("Contents")) # NOTE: settings such as "template_name", and which plugins are allowed can be added later. # Adding the reverse relation for ContentItem objects # causes the admin to list these objects when moving the shared content contentitem_set = ContentItemRelation() class Meta: verbose_name = _("Shared content") verbose_name_plural = _("Shared content") def __unicode__(self): return self.title class SharedContentItem(ContentItem): """ The contentitem to include in a page. """ shared_content = models.ForeignKey(SharedContent, verbose_name=_('Shared content'), related_name='shared_content_items') class Meta: verbose_name = _('Shared content') verbose_name_plural = _('Shared content') def __unicode__(self): return unicode(self.shared_content)
import json import rexviewer as r import naali import urllib2 from componenthandler import DynamiccomponentHandler class JavascriptHandler(DynamiccomponentHandler): GUINAME = "Javascript Handler" def __init__(self): DynamiccomponentHandler.__init__(self) self.jsloaded = False def onChanged(self): print "-----------------------------------" ent = r.getEntity(self.comp.GetParentEntityId()) datastr = self.comp.GetAttribute() #print "GetAttr got:", datastr data = json.loads(datastr) js_src = data.get('js_src', None) if not self.jsloaded and js_src is not None: jscode = self.loadjs(js_src) print jscode ctx = { #'entity'/'this': self.entity 'component': self.comp } try: ent.touchable except AttributeError: pass else: ctx['touchable'] = ent.touchable naali.runjs(jscode, ctx) print "-- done with js" self.jsloaded = True def loadjs(self, srcurl): print "js source url:", srcurl f = urllib2.urlopen(srcurl) return f.read()
Add placeable to javascript context
import json import rexviewer as r import naali import urllib2 from componenthandler import DynamiccomponentHandler class JavascriptHandler(DynamiccomponentHandler): GUINAME = "Javascript Handler" def __init__(self): DynamiccomponentHandler.__init__(self) self.jsloaded = False def onChanged(self): print "-----------------------------------" ent = r.getEntity(self.comp.GetParentEntityId()) datastr = self.comp.GetAttribute() #print "GetAttr got:", datastr data = json.loads(datastr) js_src = data.get('js_src', None) if not self.jsloaded and js_src is not None: jscode = self.loadjs(js_src) print jscode ctx = { #'entity'/'this': self.entity 'component': self.comp } try: ent.touchable except AttributeError: pass else: ctx['touchable'] = ent.touchable try: ent.placeable except: pass else: ctx['placeable'] = ent.placeable naali.runjs(jscode, ctx) print "-- done with js" self.jsloaded = True def loadjs(self, srcurl): print "js source url:", srcurl f = urllib2.urlopen(srcurl) return f.read()
import pandas as pd def _params_dict_to_dataframe(d): s = pd.Series(d) s.index.name = 'parameters' f = pd.DataFrame({'values': s}) return f def write_excel(filename, **kwargs): """Write data tables to an Excel file, using kwarg names as sheet names. Parameters ---------- filename : str The filename to write to. kwargs : dict Mapping from sheet names to data. """ writer = pd.ExcelWriter(filename) for sheet_name, obj in kwargs.items(): if isinstance(obj, dict): obj = _params_dict_to_dataframe(obj) if isinstance(obj, pd.DataFrame): obj.to_excel(writer, sheet_name=sheet_name) writer.save() writer.close()
Update deprecated excel kwarg in pandas
import pandas as pd def _params_dict_to_dataframe(d): s = pd.Series(d) s.index.name = 'parameters' f = pd.DataFrame({'values': s}) return f def write_excel(filename, **kwargs): """Write data tables to an Excel file, using kwarg names as sheet names. Parameters ---------- filename : str The filename to write to. kwargs : dict Mapping from sheet names to data. """ writer = pd.ExcelWriter(filename) for sheet_name, obj in kwargs.items(): if isinstance(obj, dict): obj = _params_dict_to_dataframe(obj) if isinstance(obj, pd.DataFrame): obj.to_excel(writer, sheetname=sheet_name) writer.save() writer.close()
# -*- coding: utf-8 -*- from .base import Base class Source(Base): def __init__(self, vim): super().__init__(vim) self.name = 'vimtex_toc' self.kind = 'file' @staticmethod def format_number(n): if not n or n['frontmatter'] or n['backmatter']: return '' num = [str(n[k]) for k in [ 'part', 'chapter', 'section', 'subsection', 'subsubsection', 'subsubsubsection'] if n[k] is not 0] if n['appendix']: num[0] = chr(int(num[0]) + 64) fnum = '.'.join(num) return fnum @staticmethod def create_candidate(e, depth): indent = (' ' * 2*(depth - e['level']) + e['title'])[:60] number = Source.format_number(e['number']) abbr = '{:65}{:10}'.format(indent, number) return {'word': e['title'], 'abbr': abbr, 'action__path': e['file'], 'action__line': e.get('line', 0)} def gather_candidates(self, context): entries = self.vim.eval('vimtex#toc#get_entries()') depth = max([e['level'] for e in entries]) return [Source.create_candidate(e, depth) for e in entries]
Fix Denite support for vim8.
# -*- coding: utf-8 -*- from .base import Base class Source(Base): def __init__(self, vim): super().__init__(vim) self.name = 'vimtex_toc' self.kind = 'file' @staticmethod def format_number(n): if not n or not type(n) is dict or n['frontmatter'] or n['backmatter']: return '' num = [str(n[k]) for k in [ 'part', 'chapter', 'section', 'subsection', 'subsubsection', 'subsubsubsection'] if n[k] is not 0] if n['appendix']: num[0] = chr(int(num[0]) + 64) fnum = '.'.join(num) return fnum @staticmethod def create_candidate(e, depth): indent = (' ' * 2*(int(depth) - int(e['level'])) + e['title'])[:60] number = Source.format_number(e['number']) abbr = '{:65}{:10}'.format(indent, number) return {'word': e['title'], 'abbr': abbr, 'action__path': e['file'], 'action__line': e.get('line', 0)} def gather_candidates(self, context): entries = self.vim.eval('vimtex#toc#get_entries()') depth = max([int(e['level']) for e in entries]) return [Source.create_candidate(e, depth) for e in entries]
from pfasst import PFASST __all__ = []
PFASST: Add warning when unable to import PFASST.
try: from pfasst import PFASST except: print 'WARNING: Unable to import PFASST.' __all__ = []
from pywatson.answer.answer import Answer from pywatson.question.question import Question import requests class Watson: """The Watson API adapter class""" def __init__(self, url, username, password): self.url = url self.username = username self.password = password def ask_question(self, question_text, question=None): """Ask Watson a question via the Question and Answer API :param question_text: question to ask Watson :type question_text: str :param question: if question_text is not provided, a Question object representing the question to ask Watson :type question: Question :return: Answer """ if question is not None: q = question.to_dict() else: q = Question(question_text).to_dict() r = requests.post(self.url + '/question', json=q) return Answer(r.json())
Use __dict__ instead of to_dict()
from pywatson.answer.answer import Answer from pywatson.question.question import Question import requests class Watson(object): """The Watson API adapter class""" def __init__(self, url, username, password): self.url = url self.username = username self.password = password def ask_question(self, question_text, question=None): """Ask Watson a question via the Question and Answer API :param question_text: question to ask Watson :type question_text: str :param question: if question_text is not provided, a Question object representing the question to ask Watson :type question: Question :return: Answer """ if question is not None: q = question.__dict__ else: q = Question(question_text).__dict__ r = requests.post(self.url + '/question', json=q) return Answer(r.json())
"""This module provides views for application.""" from tof_server import app, versioning, mysql from flask import jsonify, make_response import string, random @app.route('/') def index(): """Server information""" return jsonify({ 'server-version' : versioning.SERVER_VERSION, 'client-versions' : versioning.CLIENT_VERSIONS }) @app.route('/players', methods=['POST']) def generate_new_id(): """Method for generating new unique player ids""" try: cursor = mysql.connection.cursor() new_pin = '' characters_pool = string.ascii_uppercase + string.digits for _ in range(8): new_pin = new_pin + random.SystemRandom().choice(characters_pool) return jsonify({ 'id' : 'somestubid', 'pin' : new_pin }) except Exception as er_msg: return make_response(jsonify({ 'error' : str(er_msg) }), 500) finally: cursor.close()
Insert new player data into db
"""This module provides views for application.""" from tof_server import app, versioning, mysql from flask import jsonify, make_response import string, random @app.route('/') def index(): """Server information""" return jsonify({ 'server-version' : versioning.SERVER_VERSION, 'client-versions' : versioning.CLIENT_VERSIONS }) @app.route('/players', methods=['POST']) def generate_new_id(): """Method for generating new unique player ids""" try: cursor = mysql.connection.cursor() new_pin = '' characters_pool = string.ascii_uppercase + string.digits for _ in range(8): new_pin = new_pin + random.SystemRandom().choice(characters_pool) insert_sql = "INSERT INTO players (auto_pin) VALUES ('%s')" id_sql = "SELECT LAST_INSERT_ID()" cursor.execute(insert_sql, (new_pin)) cursor.execute(id_sql) insert_data = cursor.fetchone() return jsonify({ 'id' : insert_data[0], 'pin' : new_pin }) except Exception as er_msg: return make_response(jsonify({ 'error' : str(er_msg) }), 500) finally: cursor.close()
import bookmarks import unittest class FlaskrTestCase(unittest.TestCase): def setUp(self): self.app = bookmarks.app.test_client() # with bookmarks.app.app_context(): bookmarks.database.init_db() def tearDown(self): # with bookmarks.app.app_context(): bookmarks.database.db_session.remove() bookmarks.database.Base.metadata.drop_all( bind=bookmarks.database.engine) def test_empty_db(self): rv = self.app.get('/') assert b'There aren\'t any bookmarks yet.' in rv.data def register(self, username, name, email, password): return self.app.post('/register_user/', data=dict( username=username, name=name, email=email, password=password, confirm=password ), follow_redirects=True) def login(self, username, password): return self.app.post('/login', data=dict( username=username, password=password, confirm=password ), follow_redirects=True) def logout(self): return self.app.get('/logout', follow_redirects=True) def test_register(self): username = 'byanofsky' name = 'Brandon Yanofsky' email = '[email protected]' password = 'Brandon123' rv = self.register(username, name, email, password) # print(rv.data) assert (b'Successfully registered ' in rv.data) if __name__ == '__main__': unittest.main()
Add param for confirm field on register test func
import bookmarks import unittest class FlaskrTestCase(unittest.TestCase): def setUp(self): self.app = bookmarks.app.test_client() # with bookmarks.app.app_context(): bookmarks.database.init_db() def tearDown(self): # with bookmarks.app.app_context(): bookmarks.database.db_session.remove() bookmarks.database.Base.metadata.drop_all( bind=bookmarks.database.engine) def test_empty_db(self): rv = self.app.get('/') assert b'There aren\'t any bookmarks yet.' in rv.data def register(self, username, name, email, password, confirm=None): return self.app.post('/register_user/', data=dict( username=username, name=name, email=email, password=password, confirm=confirm ), follow_redirects=True) def login(self, username, password): return self.app.post('/login', data=dict( username=username, password=password, confirm=password ), follow_redirects=True) def logout(self): return self.app.get('/logout', follow_redirects=True) def test_register(self): username = 'byanofsky' name = 'Brandon Yanofsky' email = '[email protected]' password = 'Brandon123' rv = self.register(username, name, email, password) # print(rv.data) assert (b'Successfully registered ' in rv.data) if __name__ == '__main__': unittest.main()
from django.db import models from django.db.models.deletion import PROTECT from django_extensions.db.fields import AutoSlugField class Workshop(models.Model): event = models.ForeignKey('events.Event', PROTECT, related_name='workshops') applicant = models.ForeignKey('cfp.Applicant', related_name='workshops') title = models.CharField(max_length=80) slug = AutoSlugField(populate_from="title", unique=True) about = models.TextField() abstract = models.TextField() extra_info = models.TextField(blank=True) skill_level = models.ForeignKey('cfp.AudienceSkillLevel', PROTECT) starts_at = models.DateTimeField() duration_hours = models.DecimalField(max_digits=3, decimal_places=1) tickets_link = models.URLField(blank=True) price = models.PositiveIntegerField(blank=True, null=True) @property def approximate_euro_price(self): return int(self.price / 7.5)
Check price exists before using it
from django.db import models from django.db.models.deletion import PROTECT from django_extensions.db.fields import AutoSlugField class Workshop(models.Model): event = models.ForeignKey('events.Event', PROTECT, related_name='workshops') applicant = models.ForeignKey('cfp.Applicant', related_name='workshops') title = models.CharField(max_length=80) slug = AutoSlugField(populate_from="title", unique=True) about = models.TextField() abstract = models.TextField() extra_info = models.TextField(blank=True) skill_level = models.ForeignKey('cfp.AudienceSkillLevel', PROTECT) starts_at = models.DateTimeField() duration_hours = models.DecimalField(max_digits=3, decimal_places=1) tickets_link = models.URLField(blank=True) price = models.PositiveIntegerField(blank=True, null=True) @property def approximate_euro_price(self): return int(self.price / 7.5) if self.price else None
from django.template.loader import get_template from django.template import Context from haystack import indexes from haystack import site from comics.core.models import Image class ImageIndex(indexes.SearchIndex): document = indexes.CharField(document=True, use_template=True) rendered = indexes.CharField(indexed=False) def prepare_rendered(self, obj): template = get_template('search/results.html') context = Context({'release': obj.get_first_release()}) return template.render(context) site.register(Image, ImageIndex)
Add get_updated_field to search index
from django.template.loader import get_template from django.template import Context from haystack import indexes from haystack import site from comics.core.models import Image class ImageIndex(indexes.SearchIndex): document = indexes.CharField(document=True, use_template=True) rendered = indexes.CharField(indexed=False) def get_updated_field(self): return 'fetched' def prepare_rendered(self, obj): template = get_template('search/results.html') context = Context({'release': obj.get_first_release()}) return template.render(context) site.register(Image, ImageIndex)
from django.core.management import BaseCommand, CommandError from mysite.missions import controllers import sys class Command(BaseCommand): args = '<repo_path> <txn_id>' help = 'SVN pre-commit hook for mission repositories' def handle(self, *args, **options): # This management command is called from the mission svn repositories # as the pre-commit hook. It receives the repository path and transaction # ID as arguments, and it receives a description of applicable lock # tokens on stdin. Its environment and current directory are undefined. if len(args) != 2: raise CommandError, 'Exactly two arguments are expected.' repo_path, txn_id = args try: controllers.SvnCommitMission.pre_commit_hook(repo_path, txn_id) except controllers.IncorrectPatch, e: sys.stderr.write(str(e) + '\n\n') raise CommandError, 'The commit failed to validate.'
Make the error message stand out more for the user when we reject an svn commit.
from django.core.management import BaseCommand, CommandError from mysite.missions import controllers import sys class Command(BaseCommand): args = '<repo_path> <txn_id>' help = 'SVN pre-commit hook for mission repositories' def handle(self, *args, **options): # This management command is called from the mission svn repositories # as the pre-commit hook. It receives the repository path and transaction # ID as arguments, and it receives a description of applicable lock # tokens on stdin. Its environment and current directory are undefined. if len(args) != 2: raise CommandError, 'Exactly two arguments are expected.' repo_path, txn_id = args try: controllers.SvnCommitMission.pre_commit_hook(repo_path, txn_id) except controllers.IncorrectPatch, e: sys.stderr.write('\n ' + str(e) + '\n\n') raise CommandError, 'The commit failed to validate.'
import os __version__ = '0.5.0' # Module level variables. ffmpeg = '' """The path to the installed FFmpeg binary.""" ffprobe = '' """The path to the installed FFprobe binary.""" packager = '' """The path to the installed Shaka Packager binary.""" # Get the directory path where this __init__.py file resides. _dir_path = os.path.abspath(os.path.dirname(__file__)) # This will be executed at import time. for _file in os.listdir(_dir_path): if _file.startswith('ffmpeg'): ffmpeg = os.path.join(_dir_path, _file) elif _file.startswith('ffprobe'): ffprobe = os.path.join(_dir_path, _file) elif _file.startswith('packager'): packager = os.path.join(_dir_path, _file)
build: Fix usage of local streamer_binaries module The old code would search the directory for the binary to use. This worked fine if the package were installed, but when adding the module path to PYTHONPATH, this technique would fail because the folder would have executables for all architetures. Now we will compute the exact filename we expect for each exectuable, allowing the module to be used locally without installation. This is useful for testing pre-release versions of the module. Change-Id: I35d3a1009b677ef9d29379147312abe3d0a7f8b2
import os import platform __version__ = '0.5.0' # Get the directory path where this __init__.py file resides. _dir_path = os.path.abspath(os.path.dirname(__file__)) # Compute the part of the file name that indicates the OS. _os = { 'Linux': 'linux', 'Windows': 'win', 'Darwin': 'osx', }[platform.system()] # Compute the part of the file name that indicates the CPU architecture. _cpu = { 'x86_64': 'x64', # Linux/Mac report this key 'AMD64': 'x64', # Windows reports this key 'aarch64': 'arm64', }[platform.machine()] # Module level variables. ffmpeg = os.path.join(_dir_path, 'ffmpeg-{}-{}'.format(_os, _cpu)) """The path to the installed FFmpeg binary.""" ffprobe = os.path.join(_dir_path, 'ffprobe-{}-{}'.format(_os, _cpu)) """The path to the installed FFprobe binary.""" packager = os.path.join(_dir_path, 'packager-{}-{}'.format(_os, _cpu)) """The path to the installed Shaka Packager binary."""
from os.path import join, dirname from setuptools import setup import django_ponydebugger with open(join(dirname(__file__), 'README.rst')) as f: readme = f.read() with open(join(dirname(__file__), 'LICENSE')) as f: license = f.read() setup( name='django-ponydebugger', version=django_ponydebugger.__version__, description='PonyDebugger support for Django', long_description=readme, packages=[ 'django_ponydebugger', 'django_ponydebugger.domains', ], package_data={ 'django_ponydebugger': ['django-icon.png'], }, install_requires=[ 'websocket-client', ], author='Matthew Eastman', author_email='[email protected]', url='https://github.com/educreations/django-ponydebugger', license=license, classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Debuggers', ], )
Use a short string for the license.
import os from setuptools import setup import django_ponydebugger with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as f: readme = f.read() setup( name='django-ponydebugger', version=django_ponydebugger.__version__, description='PonyDebugger support for Django', long_description=readme, packages=[ 'django_ponydebugger', 'django_ponydebugger.domains', ], package_data={ 'django_ponydebugger': ['django-icon.png'], }, install_requires=[ 'websocket-client', ], author='Matthew Eastman', author_email='[email protected]', url='https://github.com/educreations/django-ponydebugger', license='MIT', classifiers=[ 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Debuggers', ], )
from setuptools import setup, find_packages version = '0.2' setup( name='ckanext-oaipmh', version=version, description="OAI-PMH harvester for CKAN", long_description="""\ """, classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='', author='Mikael Karlsson', author_email='[email protected]', url='https://github.com/kata-csc/ckanext-oaipmh', license='AGPL', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), namespace_packages=['ckanext', 'ckanext.oaipmh'], include_package_data=True, zip_safe=False, install_requires=[ # -*- Extra requirements: -*- 'pyoai', 'ckanext-harvest', 'lxml', 'rdflib', 'beautifulsoup4', 'pointfree', 'functionally', 'fn', ], entry_points=\ """ [ckan.plugins] # Add plugins here, eg oaipmh_harvester=ckanext.oaipmh.harvester:OAIPMHHarvester """, )
Update author to CSC - IT Center for Science Ltd.
from setuptools import setup, find_packages version = '0.2' setup( name='ckanext-oaipmh', version=version, description="OAI-PMH harvester for CKAN", long_description="""\ """, classifiers=[], # Get strings from http://pypi.python.org/pypi?%3Aaction=list_classifiers keywords='', author='CSC - IT Center for Science Ltd.', author_email='[email protected]', url='https://github.com/kata-csc/ckanext-oaipmh', license='AGPL', packages=find_packages(exclude=['ez_setup', 'examples', 'tests']), namespace_packages=['ckanext', 'ckanext.oaipmh'], include_package_data=True, zip_safe=False, install_requires=[ # -*- Extra requirements: -*- 'pyoai', 'ckanext-harvest', 'lxml', 'rdflib', 'beautifulsoup4', 'pointfree', 'functionally', 'fn', ], entry_points=\ """ [ckan.plugins] # Add plugins here, eg oaipmh_harvester=ckanext.oaipmh.harvester:OAIPMHHarvester """, )