certifications_ids = certification_obj.search(cr, uid, domain, context=context) certifications = certification_obj.browse(cr, uid, certifications_ids, context=context) types = cert_type_obj.browse(cr, uid, cert_type_obj.search(cr, uid, [], context=context), context=context) data = { 'certifications': certifications, 'types': types } return request.website.render("website_certification.certified_partners", data) # -*- coding: utf-8 -*- import pandas as pd import json import datetime import os def read_users(): users = list() file_in = open('./username_list.txt', 'r') username_list = str(file_in.read()).split(' ') file_in.close() num = 0 for username in username_list: if not username: continue if not os.path.exists('./data/Users/%s.json' % username): continue if not os.path.exists('./data/Twitter/%s_t.json' % username): continue try: file_in = open('./data/Users/%s.json' % username, 'r') raw_data = json.loads(str(file_in.read())) file_in.close() user = dict() user['followers_count'] = raw_data['profile']['user']['socialStats']['usersFollowedByCount'] user['following_count'] = raw_data['profile']['user']['socialStats']['usersFollowedCount'] file_in = open('./data/Twitter/%s_t.json' % username, 'r') raw_data = json.loads(str(file_in.read())) file_in.close() user['t_following_count'] = raw_data['profile_user']['friends_count'] user['t_followers_count'] = raw_data['profile_user']['followers_count'] users.append(user) except: continue num += 1 print(username) print(num) return pd.read_json(json.dumps(users)) if __name__ == '__main__': if not os.path.exists('./result'): os.mkdir('./result') users_data = read_users() users_data.to_csv('./result/twitter.csv', sep='\t', encoding='utf-8') # -*- coding: utf-8 -*- """ werkzeug.testsuite.wsgi ~~~~~~~~~~~~~~~~~~~~~~~ Tests the WSGI utilities. :copyright: (c) 2014 by Armin Ronacher. :license: BSD, see LICENSE for more details. """ import unittest from os import path from contextlib import closing from werkzeug.testsuite import WerkzeugTestCase, get_temporary_directory from werkzeug.wrappers import BaseResponse from werkzeug.exceptions import BadRequest, ClientDisconnected from werkzeug.test import Client, create_environ, run_wsgi_app from werkzeug import wsgi from werkzeug._compat import StringIO, BytesIO, NativeStringIO, to_native class WSGIUtilsTestCase(WerkzeugTestCase): def test_shareddatamiddleware_get_file_loader(self): app = wsgi.SharedDataMiddleware(None, {}) assert callable(app.get_file_loader('foo')) def test_shared_data_middleware(self): def null_application(environ, start_response): start_response('404 NOT FOUND', [('Content-Type', 'text/plain')]) yield b'NOT FOUND' test_dir = get_temporary_directory() with open(path.join(test_dir, to_native(u'äöü', 'utf-8')), 'w') as test_file: test_file.write(u'FOUND') app = wsgi.SharedDataMiddleware(null_application, { '/': path.join(path.dirname(__file__), 'res'), '/sources': path.join(path.dirname(__file__), 'res'), '/pkg': ('werkzeug.debug', 'shared'), '/foo': test_dir }) for p in '/test.txt', '/sources/test.txt', '/foo/äöü': app_iter, status, headers = run_wsgi_app(app, create_environ(p)) self.assert_equal(status, '200 OK') with closing(app_iter) as app_iter: data = b''.join(app_iter).strip() self.assert_equal(data, b'FOUND') app_iter, status, headers = run_wsgi_app( app, create_environ('/pkg/debugger.js')) with closing(app_iter) as app_iter: contents = b''.join(app_iter) self.assert_in(b'$(function() {', contents) app_iter, status, headers = run_wsgi_app( app, create_environ('/missing')) self.assert_equal(status, '404 NOT FOUND') self.assert_equal(b''.join(app_iter).strip(), b'NOT FOUND') def test_get_host(self): env = {'HTTP_X_FORWARDED_HOST': 'example.org', 'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'} self.assert_equal(wsgi.get_host(env), 'example.org') self.assert_equal( wsgi.get_host(create_environ('/', 'http://example.org')), 'example.org') def test_get_host_multiple_forwarded(self): env = {'HTTP_X_FORWARDED_HOST': 'example.com, example.org', 'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'} self.assert_equal(wsgi.get_host(env), 'example.com') self.assert_equal( wsgi.get_host(create_environ('/', 'http://example.com')), 'example.com') def test_get_host_validation(self): env = {'HTTP_X_FORWARDED_HOST': 'example.org', 'SERVER_NAME': 'bullshit', 'HOST_NAME': 'ignore me dammit'} self.assert_equal(wsgi.get_host(env, trusted_hosts=['.example.org']), 'example.org') self.assert_raises(BadRequest, wsgi.get_host, env, trusted_hosts=['example.com']) def test_responder(self): def foo(environ, start_response): return BaseResponse(b'Test') client = Client(wsgi.responder(foo), BaseResponse) response = client.get('/') self.assert_equal(response.status_code, 200) self.assert_equal(response.data, b'Test') def test_pop_path_info(self): original_env = {'SCRIPT_NAME': '/foo', 'PATH_INFO': '/a/b///c'} # regular path info popping def assert_tuple(script_name, path_info): self.assert_equal(env.get('SCRIPT_NAME'), script_name) self.assert_equal(env.get('PATH_INFO'), path_info) env = original_env.copy() pop = lambda: wsgi.pop_path_info(env) assert_tuple('/foo', '/a/b///c') self.assert_equal(pop(), 'a') assert_tuple('/foo/a', '/b///c') self.assert_equal(pop(), 'b') assert_tuple('/foo/a/b', '///c') self.assert_equal(pop(), 'c') assert_tuple('/foo/a/b///c', '') self.assert_is_none(pop()) def test_peek_path_info(self): env = { 'SCRIPT_NAME': '/foo', 'PATH_INFO': '/aaa/b///c' } self.assert_equal(wsgi.peek_path_info(env), 'aaa') self.assert_equal(wsgi.peek_path_info(env), 'aaa') self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa') self.assert_equal(wsgi.peek_path_info(env, charset=None), b'aaa') def test_path_info_and_script_name_fetching(self): env = create_environ(u'/\N{SNOWMAN}', u'http://example.com/\N{COMET}/') self.assert_equal(wsgi.get_path_info(env), u'/\N{SNOWMAN}') self.assert_equal(wsgi.get_path_info(env, charset=None), u'/\N{SNOWMAN}'.encode('utf-8')) self.assert_equal(wsgi.get_script_name(env), u'/\N{COMET}') self.assert_equal(wsgi.get_script_name(env, charset=None), u'/\N{COMET}'.encode('utf-8')) def test_query_string_fetching(self): env = create_environ(u'/?\N{SNOWMAN}=\N{COMET}') qs = wsgi.get_query_string(env) self.assert_strict_equal(qs, '%E2%98%83=%E2%98%84') def test_limited_stream(self): class RaisingLimitedStream(wsgi.LimitedStream): def on_exhausted(self): raise BadRequest('input stream exhausted') io = BytesIO(b'123456') stream = RaisingLimitedStream(io, 3) self.assert_strict_equal(stream.read(), b'123') self.assert_raises(BadRequest, stream.read) io = BytesIO(b'123456') stream = RaisingLimitedStream(io, 3) self.assert_strict_equal(stream.tell(), 0) self.assert_strict_equal(stream.read(1), b'1') self.assert_strict_equal(stream.tell(), 1) self.assert_strict_equal(stream.read(1), b'2') self.assert_strict_equal(stream.tell(), 2) self.assert_strict_equal(stream.read(1), b'3') self.assert_strict_equal(stream.tell(), 3) self.assert_raises(BadRequest, stream.read) io = BytesIO(b'123456\nabcdefg') stream = wsgi.LimitedStream(io, 9) self.assert_strict_equal(stream.readline(), b'123456\n') self.assert_strict_equal(stream.readline(), b'ab') io = BytesIO(b'123456\nabcdefg') stream = wsgi.LimitedStream(io, 9) self.assert_strict_equal(stream.readlines(), [b'123456\n', b'ab']) io = BytesIO(b'123456\nabcdefg') stream = wsgi.LimitedStream(io, 9) self.assert_strict_equal(stream.readlines(2), [b'12']) self.assert_strict_equal(stream.readlines(2), [b'34']) self.assert_strict_equal(stream.readlines(), [b'56\n', b'ab']) io = BytesIO(b'123456\nabcdefg') stream = wsgi.LimitedStream(io, 9) self.assert_strict_equal(stream.readline(100), b'123456\n') io = BytesIO(b'123456\nabcdefg') stream = wsgi.LimitedStream(io, 9) self.assert_strict_equal(stream.readlines(100), [b'123456\n', b'ab']) io = BytesIO(b'123456') stream = wsgi.LimitedStream(io, 3) self.assert_strict_equal(stream.read(1), b'1') self.assert_strict_equal(stream.read(1), b'2') self.assert_strict_equal(stream.read(), b'3') self.assert_strict_equal(stream.read(), b'') io = BytesIO(b'123456') stream = wsgi.LimitedStream(io, 3) self.assert_strict_equal(stream.read(-1), b'123') io = BytesIO(b'123456') stream = wsgi.LimitedStream(io, 0) self.assert_strict_equal(stream.read(-1), b'') io = StringIO(u'123456') stream = wsgi.LimitedStream(io, 0) self.assert_strict_equal(stream.read(-1), u'') io = StringIO(u'123\n456\n') stream = wsgi.LimitedStream(io, 8) self.assert_strict_equal(list(stream), [u'123\n', u'456\n']) def test_limited_stream_disconnection(self): io = BytesIO(b'A bit of content') # disconnect detection on out of bytes stream = wsgi.LimitedStream(io, 255) with self.assert_raises(ClientDisconnected): stream.read() # disconnect detection because file close io = BytesIO(b'x' * 255) io.close() stream = wsgi.LimitedStream(io, 255) with self.assert_raises(ClientDisconnected): stream.read() def test_path_info_extraction(self): x = wsgi.extract_path_info('http://example.com/app', '/app/hello') self.assert_equal(x, u'/hello') x = wsgi.extract_path_info('http://example.com/app', 'https://example.com/app/hello') self.assert_equal(x, u'/hello') x = wsgi.extract_path_info('http://example.com/app/', 'https://example.com/app/hello') self.assert_equal(x, u'/hello') x = wsgi.extract_path_info('http://example.com/app/', 'https://example.com/app') self.assert_equal(x, u'/') x = wsgi.extract_path_info(u'http://☃.net/', u'/fööbär') self.assert_equal(x, u'/fööbär') x = wsgi.extract_path_info(u'http://☃.net/x', u'http://☃.net/x/fööbär') self.assert_equal(x, u'/fööbär') env = create_environ(u'/fööbär', u'http://☃.net/x/') x = wsgi.extract_path_info(env, u'http://☃.net/x/fööbär') self.assert_equal(x, u'/fööbär') x = wsgi.extract_path_info('http://example.com/app/', 'https://example.com/a/hello') self.assert_is_none(x) x = wsgi.extract_path_info('http://example.com/app/', 'https://example.com/app/hello', collapse_http_schemes=False) self.assert_is_none(x) def test_get_host_fallback(self): self.assert_equal(wsgi.get_host({ 'SERVER_NAME': 'foobar.example.com', 'wsgi.url_scheme': 'http', 'SERVER_PORT': '80' }), 'foobar.example.com') self.assert_equal(wsgi.get_host({ 'SERVER_NAME': 'foobar.example.com', 'wsgi.url_scheme': 'http', 'SERVER_PORT': '81' }), 'foobar.example.com:81') def test_get_current_url_unicode(self): env = create_environ() env['QUERY_STRING'] = 'foo=bar&baz=blah&meh=\xcf' rv = wsgi.get_current_url(env) self.assert_strict_equal(rv, u'http://localhost/?foo=bar&baz=blah&meh=\ufffd') def test_multi_part_line_breaks(self): data = 'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK' test_stream = NativeStringIO(data) lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16)) self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK']) data = 'abc\r\nThis line is broken by the buffer length.' \ '\r\nFoo bar baz' test_stream = NativeStringIO(data) lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24)) self.assert_equal(lines, ['abc\r\n', 'This line is broken by the ' 'buffer length.\r\n', 'Foo bar baz']) def test_multi_part_line_breaks_bytes(self): data = b'abcdef\r\nghijkl\r\nmnopqrstuvwxyz\r\nABCDEFGHIJK' test_stream = BytesIO(data) lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=16)) self.assert_equal(lines, [b'abcdef\r\n', b'ghijkl\r\n', b'mnopqrstuvwxyz\r\n', b'ABCDEFGHIJK']) data = b'abc\r\nThis line is broken by the buffer length.' \ b'\r\nFoo bar baz' test_stream = BytesIO(data) lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=24)) self.assert_equal(lines, [b'abc\r\n', b'This line is broken by the ' b'buffer length.\r\n', b'Foo bar baz']) def test_multi_part_line_breaks_problematic(self): data = 'abc\rdef\r\nghi' for x in range(1, 10): test_stream = NativeStringIO(data) lines = list(wsgi.make_line_iter(test_stream, limit=len(data), buffer_size=4)) self.assert_equal(lines, ['abc\r', 'def\r\n', 'ghi']) def test_iter_functions_support_iterators(self): data = ['abcdef\r\nghi', 'jkl\r\nmnopqrstuvwxyz\r', '\nABCDEFGHIJK'] lines = list(wsgi.make_line_iter(data)) self.assert_equal(lines, ['abcdef\r\n', 'ghijkl\r\n', 'mnopqrstuvwxyz\r\n', 'ABCDEFGHIJK']) def test_make_chunk_iter(self): data = [u'abcdefXghi', u'jklXmnopqrstuvwxyzX', u'ABCDEFGHIJK'] rv = list(wsgi.make_chunk_iter(data, 'X')) self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz', u'ABCDEFGHIJK']) data = u'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK' test_stream = StringIO(data) rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data), buffer_size=4)) self.assert_equal(rv, [u'abcdef', u'ghijkl', u'mnopqrstuvwxyz', u'ABCDEFGHIJK']) def test_make_chunk_iter_bytes(self): data = [b'abcdefXghi', b'jklXmnopqrstuvwxyzX', b'ABCDEFGHIJK'] rv = list(wsgi.make_chunk_iter(data, 'X')) self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz', b'ABCDEFGHIJK']) data = b'abcdefXghijklXmnopqrstuvwxyzXABCDEFGHIJK' test_stream = BytesIO(data) rv = list(wsgi.make_chunk_iter(test_stream, 'X', limit=len(data), buffer_size=4)) self.assert_equal(rv, [b'abcdef', b'ghijkl', b'mnopqrstuvwxyz', b'ABCDEFGHIJK']) def test_lines_longer_buffer_size(self): data = '1234567890\n1234567890\n' for bufsize in range(1, 15): lines = list(wsgi.make_line_iter(NativeStringIO(data), limit=len(data), buffer_size=4)) self.assert_equal(lines, ['1234567890\n', '1234567890\n']) def suite(): suite = unittest.TestSuite() suite.addTest(unittest.makeSuite(WSGIUtilsTestCase)) return suite from django import forms from django.apps import apps from django.contrib.auth import get_user_model, get_permission_codename from django.contrib.auth.models import Permission from django.contrib.contenttypes.models import ContentType from django.contrib.sites.models import Site from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.forms.utils import ErrorList from django.forms.widgets import HiddenInput from django.template.defaultfilters import slugify from django.utils.encoding import force_str from django.utils.translation import gettext, gettext_lazy as _ from cms import api from cms.apphook_pool import apphook_pool from cms.cache.permissions import clear_permission_cache from cms.exceptions import PluginLimitReached from cms.extensions import extension_pool from cms.constants import PAGE_TYPES_ID, PUBLISHER_STATE_DIRTY, ROOT_USER_LEVEL from cms.forms.validators import (validate_relative_url, validate_url_uniqueness, validate_overwrite_url) from cms.forms.widgets import UserSelectAdminWidget, AppHookSelect, ApplicationConfigSelect from cms.models import (CMSPlugin, Page, PageType, PagePermission, PageUser, PageUserGroup, Title, Placeholder, GlobalPagePermission, TreeNode) from cms.models.permissionmodels import User from cms.plugin_pool import plugin_pool from cms.signals.apphook import set_restart_trigger from cms.utils.conf import get_cms_setting from cms.utils.compat.forms import UserChangeForm from cms.utils.i18n import get_language_list, get_language_object from cms.utils.permissions import ( get_current_user, get_subordinate_users, get_subordinate_groups, get_user_permission_level, ) from menus.menu_pool import menu_pool def get_permission_accessor(obj): User = get_user_model() if isinstance(obj, (PageUser, User,)): rel_name = 'user_permissions' else: rel_name = 'permissions' return getattr(obj, rel_name) def get_page_changed_by_filter_choices(): # This is not site-aware # Been like this forever # Would be nice for it to filter out by site values = ( Page .objects .filter(publisher_is_draft=True) .distinct() .order_by('changed_by') .values_list('changed_by', flat=True) ) yield ('', _('All')) for value in values: yield (value, value) def get_page_template_filter_choices(): yield ('', _('All')) for value, name in get_cms_setting('TEMPLATES'): yield (value, name) def save_permissions(data, obj): models = ( (Page, 'page'), (PageUser, 'pageuser'), (PageUserGroup, 'pageuser'), (PagePermission, 'pagepermission'), ) if not obj.pk: # save obj, otherwise we can't assign permissions to him obj.save() permission_accessor = get_permission_accessor(obj) for model, name in models: content_type = ContentType.objects.get_for_model(model) for key in ('add', 'change', 'delete'): # add permission `key` for model `model` codename = get_permission_codename(key, model._meta) permission = Permission.objects.get(content_type=content_type, codename=codename) field = 'can_%s_%s' % (key, name) if data.get(field): permission_accessor.add(permission) elif field in data: permission_accessor.remove(permission) class CopyPermissionForm(forms.Form): """ Holds the specific field for permissions """ copy_permissions = forms.BooleanField( label=_('Copy permissions'), required=False, initial=True, ) class BasePageForm(forms.ModelForm): _user = None _site = None _language = None title = forms.CharField(label=_("Title"), max_length=255, widget=forms.TextInput(), help_text=_('The default title')) slug = forms.CharField(label=_("Slug"), max_length=255, widget=forms.TextInput(), help_text=_('The part of the title that is used in the URL')) menu_title = forms.CharField(label=_("Menu Title"), widget=forms.TextInput(), help_text=_('Overwrite what is displayed in the menu'), required=False) page_title = forms.CharField(label=_("Page Title"), widget=forms.TextInput(), help_text=_('Overwrites what is displayed at the top of your browser or in bookmarks'), required=False) meta_description = forms.CharField(label=_('Description meta tag'), required=False, widget=forms.Textarea(attrs={'maxlength': '320', 'rows': '4'}), help_text=_('A description of the page used by search engines.'), max_length=320) class Meta: model = Page fields = [] def clean_slug(self): slug = slugify(self.cleaned_data['slug']) if not slug: raise ValidationError(_("Slug must not be empty.")) return slug class AddPageForm(BasePageForm): source = forms.ModelChoiceField( label=_(u'Page type'), queryset=Page.objects.filter( is_page_type=True, publisher_is_draft=True, ), required=False, ) parent_node = forms.ModelChoiceField( queryset=TreeNode.objects.all(), required=False, widget=forms.HiddenInput(), ) class Meta: model = Page fields = ['source'] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) source_field = self.fields.get('source') if not source_field or source_field.widget.is_hidden: return root_page = PageType.get_root_page(site=self._site) if root_page: # Set the choicefield's choices to the various page_types descendants = root_page.get_descendant_pages().filter(is_page_type=True) titles = Title.objects.filter(page__in=descendants, language=self._language) choices = [('', '---------')] choices.extend((title.page_id, title.title) for title in titles) source_field.choices = choices else: choices = [] if len(choices) < 2: source_field.widget = forms.HiddenInput() def clean(self): data = self.cleaned_data if self._errors: # Form already has errors, best to let those be # addressed first. return data parent_node = data.get('parent_node') if parent_node: slug = data['slug'] parent_path = parent_node.item.get_path(self._language) path = u'%s/%s' % (parent_path, slug) if parent_path else slug else: path = data['slug'] try: # Validate the url validate_url_uniqueness( self._site, path=path, language=self._language, ) except ValidationError as error: self.add_error('slug', error) else: data['path'] = path return data def clean_parent_node(self): parent_node = self.cleaned_data.get('parent_node') if parent_node and parent_node.site_id != self._site.pk: raise ValidationError("Site doesn't match the parent's page site") return parent_node def create_translation(self, page): data = self.cleaned_data title_kwargs = { 'page': page, 'language': self._language, 'slug': data['slug'], 'path': data['path'], 'title': data['title'], } if 'menu_title' in data: title_kwargs['menu_title'] = data['menu_title'] if 'page_title' in data: title_kwargs['page_title'] = data['page_title'] if 'meta_description' in data: title_kwargs['meta_description'] = data['meta_description'] return api.create_title(**title_kwargs) def from_source(self, source, parent=None): new_page = source.copy( site=self._site, parent_node=parent, language=self._language, translations=False, permissions=False, extensions=False, ) new_page.update(is_page_type=False, in_navigation=True) return new_page def get_template(self): return Page.TEMPLATE_DEFAULT def save(self, *args, **kwargs): source = self.cleaned_data.get('source') parent = self.cleaned_data.get('parent_node') if source: new_page = self.from_source(source, parent=parent) for lang in source.get_languages(): source._copy_contents(new_page, lang) else: new_page = super().save(commit=False) new_page.template = self.get_template() new_page.set_tree_node(self._site, target=parent, position='last-child') new_page.save() translation = self.create_translation(new_page) if source: extension_pool.copy_extensions( source_page=source, target_page=new_page, languages=[translation.language], ) is_first = not ( TreeNode .objects .get_for_site(self._site) .exclude(pk=new_page.node_id) .exists() ) new_page.rescan_placeholders() if is_first and not new_page.is_page_type: # its the first page. publish it right away new_page.publish(translation.language) new_page.set_as_homepage(self._user) new_page.clear_cache(menu=True) return new_page class AddPageTypeForm(AddPageForm): menu_title = None meta_description = None page_title = None source = forms.ModelChoiceField( queryset=Page.objects.drafts(), required=False, widget=forms.HiddenInput(), ) def get_or_create_root(self): """ Creates the root node used to store all page types for the current site if it doesn't exist. """ root_page = PageType.get_root_page(site=self._site) if not root_page: root_page = Page( publisher_is_draft=True, in_navigation=False, is_page_type=True, ) root_page.set_tree_node(self._site) root_page.save() if not root_page.has_translation(self._language): api.create_title( language=self._language, title=gettext('Page Types'), page=root_page, slug=PAGE_TYPES_ID, path=PAGE_TYPES_ID, ) return root_page.node def clean_parent_node(self): parent_node = super().clean_parent_node() if parent_node and not parent_node.item.is_page_type: raise ValidationError("Parent has to be a page type.") if not parent_node: # parent was not explicitly selected. # fallback to the page types root parent_node = self.get_or_create_root() return parent_node def from_source(self, source, parent=None): new_page = source.copy( site=self._site, parent_node=parent, language=self._language, translations=False, permissions=False, extensions=False, ) new_page.update(is_page_type=True, in_navigation=False) return new_page def save(self, *args, **kwargs): new_page = super().save(*args, **kwargs) if not self.cleaned_data.get('source'): # User has created a page-type via "Add page" # instead of from another page. new_page.update( draft_only=True, is_page_type=True, in_navigation=False, ) return new_page class DuplicatePageForm(AddPageForm): source = forms.ModelChoiceField( queryset=Page.objects.drafts(), required=True, widget=forms.HiddenInput(), ) class ChangePageForm(BasePageForm): translation_fields = ( 'slug', 'title', 'meta_description', 'menu_title', 'page_title', ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.title_obj = self.instance.get_title_obj( language=self._language, fallback=False, force_reload=True, ) for field in self.translation_fields: if field in self.fields: self.fields[field].initial = getattr(self.title_obj, field) def clean(self): data = super().clean() if self._errors: # Form already has errors, best to let those be # addressed first. return data page = self.instance if page.is_home: data['path'] = '' return data if self.title_obj.has_url_overwrite: data['path'] = self.title_obj.path return data if 'slug' not in self.fields: # the {% edit_title_fields %} template tag # allows users to edit specific fields for a translation. # as a result, slug might not always be there. return data if page.parent_page: slug = data['slug'] parent_path = page.parent_page.get_path(self._language) path = u'%s/%s' % (parent_path, slug) if parent_path else slug else: path = data['slug'] try: # Validate the url validate_url_uniqueness( self._site, path=path, language=self._language, exclude_page=page, ) except ValidationError as error: self.add_error('slug', error) else: data['path'] = path return data def save(self, commit=True): data = self.cleaned_data cms_page = super().save(commit=False) translation_data = {field: data[field] for field in self.translation_fields if field in data} if 'path' in data: # The path key is set if # the slug field is present in the form, # or if the page being edited is the home page, # or if the translation has a url override. translation_data['path'] = data['path'] update_count = cms_page.update_translations( self._language, publisher_state=PUBLISHER_STATE_DIRTY, **translation_data ) if self._language in cms_page.title_cache: del cms_page.title_cache[self._language] if update_count == 0: api.create_title(language=self._language, page=cms_page, **translation_data) # _update_title_path_recursive should be called if the new page is the parent # of already created children in multilingual sites. cms_page._update_title_path_recursive(self._language, slug=self.data['slug']) cms_page.clear_cache(menu=True) return cms_page class PublicationDatesForm(forms.ModelForm): class Meta: model = Page fields = ['publication_date', 'publication_end_date'] def save(self, *args, **kwargs): page = super().save(*args, **kwargs) page.clear_cache(menu=True) return page class AdvancedSettingsForm(forms.ModelForm): from cms.forms.fields import PageSmartLinkField _user = None _site = None _language = None application_urls = forms.ChoiceField(label=_('Application'), choices=(), required=False, help_text=_('Hook application to this page.')) overwrite_url = forms.CharField(label=_('Overwrite URL'), max_length=255, required=False, help_text=_('Keep this field empty if standard path should be used.')) xframe_options = forms.ChoiceField( choices=Page._meta.get_field('xframe_options').choices, label=_('X Frame Options'), help_text=_('Whether this page can be embedded in other pages or websites'), initial=Page._meta.get_field('xframe_options').default, required=False ) redirect = PageSmartLinkField(label=_('Redirect'), required=False, help_text=_('Redirects to this URL.'), placeholder_text=_('Start typing...'), ajax_view='admin:cms_page_get_published_pagelist', ) # This is really a 'fake' field which does not correspond to any Page attribute # But creates a stub field to be populate by js application_configs = forms.CharField( label=_('Application configurations'), required=False, widget=ApplicationConfigSelect, ) fieldsets = ( (None, { 'fields': ('overwrite_url', 'redirect'), }), (_('Language independent options'), { 'fields': ('template', 'reverse_id', 'soft_root', 'navigation_extenders', 'application_urls', 'application_namespace', 'application_configs', 'xframe_options',) }) ) class Meta: model = Page fields = [ 'template', 'reverse_id', 'overwrite_url', 'redirect', 'soft_root', 'navigation_extenders', 'application_urls', 'application_namespace', "xframe_options", ] def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.title_obj = self.instance.get_title_obj( language=self._language, fallback=False, force_reload=True, ) if 'navigation_extenders' in self.fields: navigation_extenders = self.get_navigation_extenders() self.fields['navigation_extenders'].widget = forms.Select( {}, [('', "---------")] + navigation_extenders) if 'application_urls' in self.fields: # Prepare a dict mapping the apps by class name ('PollApp') to # their app_name attribute ('polls'), if any. app_namespaces = {} app_configs = {} for hook in apphook_pool.get_apphooks(): app = apphook_pool.get_apphook(hook[0]) if app.app_name: app_namespaces[hook[0]] = app.app_name if app.app_config: app_configs[hook[0]] = app self.fields['application_urls'].widget = AppHookSelect( attrs={'id': 'application_urls'}, app_namespaces=app_namespaces ) self.fields['application_urls'].choices = [('', "---------")] + apphook_pool.get_apphooks() page_data = self.data if self.data else self.initial if app_configs: self.fields['application_configs'].widget = ApplicationConfigSelect( attrs={'id': 'application_configs'}, app_configs=app_configs, ) if page_data.get('application_urls', False) and page_data['application_urls'] in app_configs: configs = app_configs[page_data['application_urls']].get_configs() self.fields['application_configs'].widget.choices = [(config.pk, force_str(config)) for config in configs] try: config = configs.get(namespace=self.initial['application_namespace']) self.fields['application_configs'].initial = config.pk except ObjectDoesNotExist: # Provided apphook configuration doesn't exist (anymore), # just skip it # The user will choose another value anyway pass if 'redirect' in self.fields: self.fields['redirect'].widget.language = self._language self.fields['redirect'].initial = self.title_obj.redirect if 'overwrite_url' in self.fields and self.title_obj.has_url_overwrite: self.fields['overwrite_url'].initial = self.title_obj.path def get_apphooks(self): for hook in apphook_pool.get_apphooks(): yield (hook[0], apphook_pool.get_apphook(hook[0])) def get_apphooks_with_config(self): return {key: app for key, app in self.get_apphooks() if app.app_config} def get_navigation_extenders(self): return menu_pool.get_menus_by_attribute("cms_enabled", True) def _check_unique_namespace_instance(self, namespace): return Page.objects.drafts().on_site(self._site).filter( application_namespace=namespace ).exclude(pk=self.instance.pk).exists() def clean(self): cleaned_data = super().clean() if cleaned_data.get("overwrite_url"): # Assuming that the user enters a full URL in the overwrite_url input. # Here we validate it before publishing the page and if it contains # reserved characters (e.g. $?:#), we add error in the form. # issue 6934 url = cleaned_data.get("overwrite_url") if url and not validate_overwrite_url(value=url): self._errors['overwrite_url'] = self.error_class([_('You entered an invalid URL.')]) if self._errors: # Fail fast if there's errors in the form return cleaned_data # Language has been validated already # so we know it exists. language_name = get_language_object( self._language, site_id=self._site.pk, )['name'] if not self.title_obj.slug: # This covers all cases where users try to edit # page advanced settings without setting a title slug # for page titles that already exist. message = _("Please set the %(language)s slug " "before editing its advanced settings.") raise ValidationError(message % {'language': language_name}) if 'reverse_id' in self.fields: reverse_id = cleaned_data['reverse_id'] if reverse_id: lookup = Page.objects.drafts().on_site(self._site).filter(reverse_id=reverse_id) if lookup.exclude(pk=self.instance.pk).exists(): self._errors['reverse_id'] = self.error_class( [_('A page with this reverse URL id exists already.')]) apphook = cleaned_data.get('application_urls', None) # The field 'application_namespace' is a misnomer. It should be # 'instance_namespace'. instance_namespace = cleaned_data.get('application_namespace', None) application_config = cleaned_data.get('application_configs', None) if apphook: apphooks_with_config = self.get_apphooks_with_config() # application_config wins over application_namespace if apphook in apphooks_with_config and application_config: # the value of the application config namespace is saved in # the 'usual' namespace field to be backward compatible # with existing apphooks try: appconfig_pk = forms.IntegerField(required=True).to_python(application_config) except ValidationError: self._errors['application_configs'] = ErrorList([ _('Invalid application config value') ]) return self.cleaned_data try: config = apphooks_with_config[apphook].get_configs().get(pk=appconfig_pk) except ObjectDoesNotExist: self._errors['application_configs'] = ErrorList([ _('Invalid application config value') ]) return self.cleaned_data if self._check_unique_namespace_instance(config.namespace): # Looks like there's already one with the default instance # namespace defined. self._errors['application_configs'] = ErrorList([ _('An application instance using this configuration already exists.') ]) else: self.cleaned_data['application_namespace'] = config.namespace else: if instance_namespace: if self._check_unique_namespace_instance(instance_namespace): self._errors['application_namespace'] = ErrorList([ _('An application instance with this name already exists.') ]) else: # The attribute on the apps 'app_name' is a misnomer, it should be # 'application_namespace'. application_namespace = apphook_pool.get_apphook(apphook).app_name if application_namespace and not instance_namespace: if self._check_unique_namespace_instance(application_namespace): # Looks like there's already one with the default instance # namespace defined. self._errors['application_namespace'] = ErrorList([ _('An application instance with this name already exists.') ]) else: # OK, there are zero instances of THIS app that use the # default instance namespace, so, since the user didn't # provide one, we'll use the default. NOTE: The following # line is really setting the "instance namespace" of the # new app to the app’s "application namespace", which is # the default instance namespace. self.cleaned_data['application_namespace'] = application_namespace if instance_namespace and not apphook: self.cleaned_data['application_namespace'] = None if application_config and not apphook: self.cleaned_data['application_configs'] = None return self.cleaned_data def clean_xframe_options(self): if 'xframe_options' not in self.fields: return # nothing to do, field isn't present xframe_options = self.cleaned_data['xframe_options'] if xframe_options == '': return Page._meta.get_field('xframe_options').default return xframe_options def clean_overwrite_url(self): path_override = self.cleaned_data.get('overwrite_url') if path_override: path = path_override.strip('/') else: path = self.instance.get_path_for_slug(self.title_obj.slug, self._language) validate_url_uniqueness( self._site, path=path, language=self._language, exclude_page=self.instance, ) self.cleaned_data['path'] = path return path_override def has_changed_apphooks(self): changed_data = self.changed_data if 'application_urls' in changed_data: return True return 'application_namespace' in changed_data def update_apphooks(self): # User has changed the apphooks on the page. # Update the public version of the page to reflect this change immediately. public_id = self.instance.publisher_public_id self._meta.model.objects.filter(pk=public_id).update( application_urls=self.instance.application_urls, application_namespace=(self.instance.application_namespace or None), ) # Connects the apphook restart handler to the request finished signal set_restart_trigger() def save(self, *args, **kwargs): data = self.cleaned_data page = super().save(*args, **kwargs) page.update_translations( self._language, path=data['path'], redirect=(data.get('redirect') or None), publisher_state=PUBLISHER_STATE_DIRTY, has_url_overwrite=bool(data.get('overwrite_url')), ) is_draft_and_has_public = page.publisher_is_draft and page.publisher_public_id if is_draft_and_has_public and self.has_changed_apphooks(): self.update_apphooks() page.clear_cache(menu=True) return page class PagePermissionForm(forms.ModelForm): class Meta: model = Page fields = ['login_required', 'limit_visibility_in_menu'] def save(self, *args, **kwargs): page = super().save(*args, **kwargs) page.clear_cache(menu=True) clear_permission_cache() return page class PageTreeForm(forms.Form): position = forms.IntegerField(initial=0, required=True) target = forms.ModelChoiceField(queryset=Page.objects.none(), required=False) def __init__(self, *args, **kwargs): self.page = kwargs.pop('page') self._site = kwargs.pop('site', Site.objects.get_current()) super().__init__(*args, **kwargs) self.fields['target'].queryset = Page.objects.drafts().filter( node__site=self._site, is_page_type=self.page.is_page_type, ) def get_root_nodes(self): # TODO: this needs to avoid using the pages accessor directly nodes = TreeNode.get_root_nodes() return nodes.exclude(cms_pages__is_page_type=not(self.page.is_page_type)) def get_tree_options(self): position = self.cleaned_data['position'] target_page = self.cleaned_data.get('target') parent_node = target_page.node if target_page else None if parent_node: return self._get_tree_options_for_parent(parent_node, position) return self._get_tree_options_for_root(position) def _get_tree_options_for_root(self, position): siblings = self.get_root_nodes().filter(site=self._site) try: target_node = siblings[position] except IndexError: # The position requested is not occupied. # Add the node as the last root node, # relative to the current site. return (siblings.reverse()[0], 'right') return (target_node, 'left') def _get_tree_options_for_parent(self, parent_node, position): if position == 0: return (parent_node, 'first-child') siblings = parent_node.get_children().filter(site=self._site) try: target_node = siblings[position] except IndexError: # The position requested is not occupied. # Add the node to be the parent's first child return (parent_node, 'last-child') return (target_node, 'left') class MovePageForm(PageTreeForm): def clean(self): cleaned_data = super().clean() if self.page.is_home and cleaned_data.get('target'): self.add_error('target', force_str(_('You can\'t move the home page inside another page'))) return cleaned_data def get_tree_options(self): options = super().get_tree_options() target_node, target_node_position = options if target_node_position != 'left': return (target_node, target_node_position) node = self.page.node node_is_first = node.path < target_node.path if node_is_first and node.is_sibling_of(target_node): # The node being moved appears before the target node # and is a sibling of the target node. # The user is moving from left to right. target_node_position = 'right' elif node_is_first: # The node being moved appears before the target node # but is not a sibling of the target node. # The user is moving from right to left. target_node_position = 'left' else: # The node being moved appears after the target node. # The user is moving from right to left. target_node_position = 'left' return (target_node, target_node_position) def move_page(self): self.page.move_page(*self.get_tree_options()) class CopyPageForm(PageTreeForm): source_site = forms.ModelChoiceField(queryset=Site.objects.all(), required=True) copy_permissions = forms.BooleanField(initial=False, required=False) def copy_page(self): target, position = self.get_tree_options() copy_permissions = self.cleaned_data.get('copy_permissions', False) new_page = self.page.copy_with_descendants( target_node=target, position=position, copy_permissions=copy_permissions, target_site=self._site, ) new_page.clear_cache(menu=True) return new_page def _get_tree_options_for_root(self, position): try: return super()._get_tree_options_for_root(position) except IndexError: # The user is copying a page to a site with no pages # Add the node as the last root node. siblings = self.get_root_nodes().reverse() return (siblings[0], 'right') class ChangeListForm(forms.Form): BOOLEAN_CHOICES = ( ('', _('All')), ('1', _('Yes')), ('0', _('No')), ) q = forms.CharField(required=False, widget=forms.HiddenInput()) in_navigation = forms.ChoiceField(required=False, choices=BOOLEAN_CHOICES) template = forms.ChoiceField(required=False) changed_by = forms.ChoiceField(required=False) soft_root = forms.ChoiceField(required=False, choices=BOOLEAN_CHOICES) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['changed_by'].choices = get_page_changed_by_filter_choices() self.fields['template'].choices = get_page_template_filter_choices() def is_filtered(self): data = self.cleaned_data if self.cleaned_data.get('q'): return True return any(bool(data.get(field.name)) for field in self.visible_fields()) def get_filter_items(self): for field in self.visible_fields(): value = self.cleaned_data.get(field.name) if value: yield (field.name, value) def run_filters(self, queryset): for field, value in self.get_filter_items(): query = {'{}__exact'.format(field): value} queryset = queryset.filter(**query) return queryset class BasePermissionAdminForm(forms.ModelForm): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) permission_fields = self._meta.model.get_all_permissions() for field in permission_fields: if field not in self.base_fields: setattr(self.instance, field, False) class PagePermissionInlineAdminForm(BasePermissionAdminForm): """ Page permission inline admin form used in inline admin. Required, because user and group queryset must be changed. User can see only users on the same level or under him in chosen page tree, and users which were created by him, but aren't assigned to higher page level than current user. """ page = forms.ModelChoiceField( queryset=Page.objects.all(), label=_('user'), widget=HiddenInput(), required=True, ) def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) user = get_current_user() # current user from threadlocals site = Site.objects.get_current() sub_users = get_subordinate_users(user, site) limit_choices = True use_raw_id = False # Unfortunately, if there are > 500 users in the system, non-superusers # won't see any benefit here because if we ask Django to put all the # user PKs in limit_choices_to in the query string of the popup we're # in danger of causing 414 errors so we fall back to the normal input # widget. if get_cms_setting('RAW_ID_USERS'): if sub_users.count() < 500: # If there aren't too many users, proceed as normal and use a # raw id field with limit_choices_to limit_choices = True use_raw_id = True elif get_user_permission_level(user, site) == ROOT_USER_LEVEL: # If there are enough choices to possibly cause a 414 request # URI too large error, we only proceed with the raw id field if # the user is a superuser & thus can legitimately circumvent # the limit_choices_to condition. limit_choices = False use_raw_id = True # We don't use the fancy custom widget if the admin form wants to use a # raw id field for the user if use_raw_id: from django.contrib.admin.widgets import ForeignKeyRawIdWidget # This check will be False if the number of users in the system # is less than the threshold set by the RAW_ID_USERS setting. if isinstance(self.fields['user'].widget, ForeignKeyRawIdWidget): # We can't set a queryset on a raw id lookup, but we can use # the fact that it respects the limit_choices_to parameter. if limit_choices: self.fields['user'].widget.rel.limit_choices_to = dict( id__in=list(sub_users.values_list('pk', flat=True)) ) else: self.fields['user'].widget = UserSelectAdminWidget() self.fields['user'].queryset = sub_users self.fields['user'].widget.user = user # assign current user self.fields['group'].queryset = get_subordinate_groups(user, site) class Meta: fields = [ 'user', 'group', 'can_add', 'can_change', 'can_delete', 'can_publish', 'can_change_advanced_settings', 'can_change_permissions', 'can_move_page', 'grant_on', ] model = PagePermission class ViewRestrictionInlineAdminForm(BasePermissionAdminForm): page = forms.ModelChoiceField( queryset=Page.objects.all(), label=_('user'), widget=HiddenInput(), required=True, ) can_view = forms.BooleanField( label=_('can_view'), widget=HiddenInput(), initial=True, ) class Meta: fields = [ 'user', 'group', 'grant_on', 'can_view', ] model = PagePermission def clean_can_view(self): return True class GlobalPagePermissionAdminForm(BasePermissionAdminForm): class Meta: fields = [ 'user', 'group', 'can_add', 'can_change', 'can_delete', 'can_publish', 'can_change_advanced_settings', 'can_change_permissions', 'can_move_page', 'can_view', 'sites', ] model = GlobalPagePermission class GenericCmsPermissionForm(forms.ModelForm): """Generic form for User & Grup permissions in cms """ _current_user = None can_add_page = forms.BooleanField(label=_('Add'), required=False, initial=True) can_change_page = forms.BooleanField(label=_('Change'), required=False, initial=True) can_delete_page = forms.BooleanField(label=_('Delete'), required=False) # pageuser is for pageuser & group - they are combined together, # and read out from PageUser model can_add_pageuser = forms.BooleanField(label=_('Add'), required=False) can_change_pageuser = forms.BooleanField(label=_('Change'), required=False) can_delete_pageuser = forms.BooleanField(label=_('Delete'), required=False) can_add_pagepermission = forms.BooleanField(label=_('Add'), required=False) can_change_pagepermission = forms.BooleanField(label=_('Change'), required=False) can_delete_pagepermission = forms.BooleanField(label=_('Delete'), required=False) def __init__(self, *args, **kwargs): instance = kwargs.get('instance') initial = kwargs.get('initial') or {} if instance: initial = initial or {} initial.update(self.populate_initials(instance)) kwargs['initial'] = initial super().__init__(*args, **kwargs) def clean(self): data = super().clean() # Validate Page options if not data.get('can_change_page'): if data.get('can_add_page'): message = _("Users can't create a page without permissions " "to change the created page. Edit permissions required.") raise ValidationError(message) if data.get('can_delete_page'): message = _("Users can't delete a page without permissions " "to change the page. Edit permissions required.") raise ValidationError(message) if data.get('can_add_pagepermission'): message = _("Users can't set page permissions without permissions " "to change a page. Edit permissions required.") raise ValidationError(message) if data.get('can_delete_pagepermission'): message = _("Users can't delete page permissions without permissions " "to change a page. Edit permissions required.") raise ValidationError(message) # Validate PagePermission options if not data.get('can_change_pagepermission'): if data.get('can_add_pagepermission'): message = _("Users can't create page permissions without permissions " "to change the created permission. Edit permissions required.") raise ValidationError(message) if data.get('can_delete_pagepermission'): message = _("Users can't delete page permissions without permissions " "to change permissions. Edit permissions required.") raise ValidationError(message) def populate_initials(self, obj): """Read out permissions from permission system. """ initials = {} permission_accessor = get_permission_accessor(obj) for model in (Page, PageUser, PagePermission): name = model.__name__.lower() content_type = ContentType.objects.get_for_model(model) permissions = permission_accessor.filter(content_type=content_type).values_list('codename', flat=True) for key in ('add', 'change', 'delete'): codename = get_permission_codename(key, model._meta) initials['can_%s_%s' % (key, name)] = codename in permissions return initials def save(self, commit=True): instance = super().save(commit=False) instance.save() save_permissions(self.cleaned_data, instance) return instance class PageUserAddForm(forms.ModelForm): _current_user = None user = forms.ModelChoiceField(queryset=User.objects.none()) class Meta: fields = ['user'] model = PageUser def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.fields['user'].queryset = self.get_subordinates() def get_subordinates(self): subordinates = get_subordinate_users(self._current_user, self._current_site) return subordinates.filter(pageuser__isnull=True) def save(self, commit=True): user = self.cleaned_data['user'] instance = super().save(commit=False) instance.created_by = self._current_user for field in user._meta.fields: # assign all the fields - we can do this, because object is # subclassing User (one to one relation) value = getattr(user, field.name) setattr(instance, field.name, value) if commit: instance.save() return instance class PageUserChangeForm(UserChangeForm): _current_user = None class Meta: fields = '__all__' model = PageUser def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) if not self._current_user.is_superuser: # Limit permissions to include only # the permissions available to the manager. permissions = self.get_available_permissions() self.fields['user_permissions'].queryset = permissions # Limit groups to include only those where # the manager is a member. self.fields['groups'].queryset = self.get_available_groups() def get_available_permissions(self): permissions = self._current_user.get_all_permissions() permission_codes = (perm.rpartition('.')[-1] for perm in permissions) return Permission.objects.filter(codename__in=permission_codes) def get_available_groups(self): return self._current_user.groups.all() class PageUserGroupForm(GenericCmsPermissionForm): class Meta: model = PageUserGroup fields = ('name', ) def save(self, commit=True): if not self.instance.pk: self.instance.created_by = self._current_user return super().save(commit=commit) class PluginAddValidationForm(forms.Form): placeholder_id = forms.ModelChoiceField( queryset=Placeholder.objects.all(), required=True, ) plugin_language = forms.CharField(required=True) plugin_parent = forms.ModelChoiceField( CMSPlugin.objects.all(), required=False, ) plugin_type = forms.CharField(required=True) def clean_plugin_type(self): plugin_type = self.cleaned_data['plugin_type'] try: plugin_pool.get_plugin(plugin_type) except KeyError: message = gettext("Invalid plugin type '%s'") % plugin_type raise ValidationError(message) return plugin_type def clean(self): from cms.utils.plugins import has_reached_plugin_limit data = self.cleaned_data if self.errors: return data language = data['plugin_language'] placeholder = data['placeholder_id'] parent_plugin = data.get('plugin_parent') if language not in get_language_list(): message = gettext("Language must be set to a supported language!") self.add_error('plugin_language', message) return self.cleaned_data if parent_plugin: if parent_plugin.language != language: message = gettext("Parent plugin language must be same as language!") self.add_error('plugin_language', message) return self.cleaned_data if parent_plugin.placeholder_id != placeholder.pk: message = gettext("Parent plugin placeholder must be same as placeholder!") self.add_error('placeholder_id', message) return self.cleaned_data page = placeholder.page template = page.get_template() if page else None try: has_reached_plugin_limit( placeholder, data['plugin_type'], language, template=template, parent_plugin=parent_plugin ) except PluginLimitReached as error: self.add_error(None, force_str(error)) return self.cleaned_data class RequestToolbarForm(forms.Form): obj_id = forms.CharField(required=False) obj_type = forms.CharField(required=False) cms_path = forms.CharField(required=False) def clean(self): data = self.cleaned_data obj_id = data.get('obj_id') obj_type = data.get('obj_type') if not bool(obj_id or obj_type): return data if (obj_id and not obj_type) or (obj_type and not obj_id): message = 'Invalid object lookup. Both obj_id and obj_type are required' raise forms.ValidationError(message) app, sep, model = obj_type.rpartition('.') try: model_class = apps.get_model(app_label=app, model_name=model) except LookupError: message = 'Invalid object lookup. Both obj_id and obj_type are required' raise forms.ValidationError(message) try: generic_obj = model_class.objects.get(pk=obj_id) except model_class.DoesNotExist: message = 'Invalid object lookup. Both obj_id and obj_type are required' raise forms.ValidationError(message) else: data['attached_obj'] = generic_obj return data def clean_cms_path(self): path = self.cleaned_data.get('cms_path') if path: validate_relative_url(path) return path #!/usr/bin/env python2.7 # Copyright 2017 gRPC authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Build and upload docker images to Google Container Registry per matrix.""" from __future__ import print_function import argparse import atexit import multiprocessing import os import shutil import subprocess import sys import tempfile # Language Runtime Matrix import client_matrix python_util_dir = os.path.abspath( os.path.join(os.path.dirname(__file__), '../run_tests/python_utils')) sys.path.append(python_util_dir) import dockerjob import jobset _IMAGE_BUILDER = 'tools/run_tests/dockerize/build_interop_image.sh' _LANGUAGES = client_matrix.LANG_RUNTIME_MATRIX.keys() # All gRPC release tags, flattened, deduped and sorted. _RELEASES = sorted( list( set(release for release_dict in client_matrix.LANG_RELEASE_MATRIX.values() for release in release_dict.keys()))) # Destination directory inside docker image to keep extra info from build time. _BUILD_INFO = '/var/local/build_info' argp = argparse.ArgumentParser(description='Run interop tests.') argp.add_argument('--gcr_path', default='gcr.io/grpc-testing', help='Path of docker images in Google Container Registry') argp.add_argument('--release', default='master', choices=['all', 'master'] + _RELEASES, help='github commit tag to checkout. When building all ' 'releases defined in client_matrix.py, use "all". Valid only ' 'with --git_checkout.') argp.add_argument('-l', '--language', choices=['all'] + sorted(_LANGUAGES), nargs='+', default=['all'], help='Test languages to build docker images for.') argp.add_argument('--git_checkout', action='store_true', help='Use a separate git clone tree for building grpc stack. ' 'Required when using --release flag. By default, current' 'tree and the sibling will be used for building grpc stack.') argp.add_argument('--git_checkout_root', default='/export/hda3/tmp/grpc_matrix', help='Directory under which grpc-go/java/main repo will be ' 'cloned. Valid only with --git_checkout.') argp.add_argument('--keep', action='store_true', help='keep the created local images after uploading to GCR') argp.add_argument('--reuse_git_root', default=False, action='store_const', const=True, help='reuse the repo dir. If False, the existing git root ' 'directory will removed before a clean checkout, because ' 'reusing the repo can cause git checkout error if you switch ' 'between releases.') argp.add_argument( '--upload_images', action='store_true', help='If set, images will be uploaded to container registry after building.' ) args = argp.parse_args() def add_files_to_image(image, with_files, label=None): """Add files to a docker image. image: docker image name, i.e. grpc_interop_java:26328ad8 with_files: additional files to include in the docker image. label: label string to attach to the image. """ tag_idx = image.find(':') if tag_idx == -1: jobset.message('FAILED', 'invalid docker image %s' % image, do_newline=True) sys.exit(1) orig_tag = '%s_' % image subprocess.check_output(['docker', 'tag', image, orig_tag]) lines = ['FROM ' + orig_tag] if label: lines.append('LABEL %s' % label) temp_dir = tempfile.mkdtemp() atexit.register(lambda: subprocess.call(['rm', '-rf', temp_dir])) # Copy with_files inside the tmp directory, which will be the docker build # context. for f in with_files: shutil.copy(f, temp_dir) lines.append('COPY %s %s/' % (os.path.basename(f), _BUILD_INFO)) # Create a Dockerfile. with open(os.path.join(temp_dir, 'Dockerfile'), 'w') as f: f.write('\n'.join(lines)) jobset.message('START', 'Repackaging %s' % image, do_newline=True) build_cmd = ['docker', 'build', '--rm', '--tag', image, temp_dir] subprocess.check_output(build_cmd) dockerjob.remove_image(orig_tag, skip_nonexistent=True) def build_image_jobspec(runtime, env, gcr_tag, stack_base): """Build interop docker image for a language with runtime. runtime: a string, for example go1.8. env: dictionary of env to passed to the build script. gcr_tag: the tag for the docker image (i.e. v1.3.0). stack_base: the local gRPC repo path. """ basename = 'grpc_interop_%s' % runtime tag = '%s/%s:%s' % (args.gcr_path, basename, gcr_tag) build_env = {'INTEROP_IMAGE': tag, 'BASE_NAME': basename, 'TTY_FLAG': '-t'} build_env.update(env) image_builder_path = _IMAGE_BUILDER if client_matrix.should_build_docker_interop_image_from_release_tag(lang): image_builder_path = os.path.join(stack_base, _IMAGE_BUILDER) build_job = jobset.JobSpec(cmdline=[image_builder_path], environ=build_env, shortname='build_docker_%s' % runtime, timeout_seconds=30 * 60) build_job.tag = tag return build_job def build_all_images_for_lang(lang): """Build all docker images for a language across releases and runtimes.""" if not args.git_checkout: if args.release != 'master': print( 'Cannot use --release without also enabling --git_checkout.\n') sys.exit(1) releases = [args.release] else: if args.release == 'all': releases = client_matrix.get_release_tags(lang) else: # Build a particular release. if args.release not in ['master' ] + client_matrix.get_release_tags(lang): jobset.message('SKIPPED', '%s for %s is not defined' % (args.release, lang), do_newline=True) return [] releases = [args.release] images = [] for release in releases: images += build_all_images_for_release(lang, release) jobset.message('SUCCESS', 'All docker images built for %s at %s.' % (lang, releases), do_newline=True) return images def build_all_images_for_release(lang, release): """Build all docker images for a release across all runtimes.""" docker_images = [] build_jobs = [] env = {} # If we not using current tree or the sibling for grpc stack, do checkout. stack_base = '' if args.git_checkout: stack_base = checkout_grpc_stack(lang, release) var = { 'go': 'GRPC_GO_ROOT', 'java': 'GRPC_JAVA_ROOT', 'node': 'GRPC_NODE_ROOT' }.get(lang, 'GRPC_ROOT') env[var] = stack_base for runtime in client_matrix.get_runtimes_for_lang_release(lang, release): job = build_image_jobspec(runtime, env, release, stack_base) docker_images.append(job.tag) build_jobs.append(job) jobset.message('START', 'Building interop docker images.', do_newline=True) print('Jobs to run: \n%s\n' % '\n'.join(str(j) for j in build_jobs)) num_failures, _ = jobset.run(build_jobs, newline_on_success=True, maxjobs=multiprocessing.cpu_count()) if num_failures: jobset.message('FAILED', 'Failed to build interop docker images.', do_newline=True) docker_images_cleanup.extend(docker_images) sys.exit(1) jobset.message('SUCCESS', 'All docker images built for %s at %s.' % (lang, release), do_newline=True) if release != 'master': commit_log = os.path.join(stack_base, 'commit_log') if os.path.exists(commit_log): for image in docker_images: add_files_to_image(image, [commit_log], 'release=%s' % release) return docker_images def cleanup(): if not args.keep: for image in docker_images_cleanup: dockerjob.remove_image(image, skip_nonexistent=True) docker_images_cleanup = [] atexit.register(cleanup) def maybe_apply_patches_on_git_tag(stack_base, lang, release): files_to_patch = [] release_info = client_matrix.LANG_RELEASE_MATRIX[lang].get(release) if release_info: files_to_patch = release_info.patch if not files_to_patch: return patch_file_relative_path = 'patches/%s_%s/git_repo.patch' % (lang, release) patch_file = os.path.abspath( os.path.join(os.path.dirname(__file__), patch_file_relative_path)) if not os.path.exists(patch_file): jobset.message('FAILED', 'expected patch file |%s| to exist' % patch_file) sys.exit(1) subprocess.check_output(['git', 'apply', patch_file], cwd=stack_base, stderr=subprocess.STDOUT) # TODO(jtattermusch): this really would need simplification and refactoring # - "git add" and "git commit" can easily be done in a single command # - it looks like the only reason for the existence of the "files_to_patch" # entry is to perform "git add" - which is clumsy and fragile. # - we only allow a single patch with name "git_repo.patch". A better design # would be to allow multiple patches that can have more descriptive names. for repo_relative_path in files_to_patch: subprocess.check_output(['git', 'add', repo_relative_path], cwd=stack_base, stderr=subprocess.STDOUT) subprocess.check_output([ 'git', 'commit', '-m', ('Hack performed on top of %s git ' 'tag in order to build and run the %s ' 'interop tests on that tag.' % (lang, release)) ], cwd=stack_base, stderr=subprocess.STDOUT) def checkout_grpc_stack(lang, release): """Invokes 'git check' for the lang/release and returns directory created.""" assert args.git_checkout and args.git_checkout_root if not os.path.exists(args.git_checkout_root): os.makedirs(args.git_checkout_root) repo = client_matrix.get_github_repo(lang) # Get the subdir name part of repo # For example, 'git@github.com:grpc/grpc-go.git' should use 'grpc-go'. repo_dir = os.path.splitext(os.path.basename(repo))[0] stack_base = os.path.join(args.git_checkout_root, repo_dir) # Clean up leftover repo dir if necessary. if not args.reuse_git_root and os.path.exists(stack_base): jobset.message('START', 'Removing git checkout root.', do_newline=True) shutil.rmtree(stack_base) if not os.path.exists(stack_base): subprocess.check_call(['git', 'clone', '--recursive', repo], cwd=os.path.dirname(stack_base)) # git checkout. jobset.message('START', 'git checkout %s from %s' % (release, stack_base), do_newline=True) # We should NEVER do checkout on current tree !!! assert not os.path.dirname(__file__).startswith(stack_base) output = subprocess.check_output(['git', 'checkout', release], cwd=stack_base, stderr=subprocess.STDOUT) maybe_apply_patches_on_git_tag(stack_base, lang, release) commit_log = subprocess.check_output(['git', 'log', '-1'], cwd=stack_base) jobset.message('SUCCESS', 'git checkout', '%s: %s' % (str(output), commit_log), do_newline=True) # git submodule update jobset.message('START', 'git submodule update --init at %s from %s' % (release, stack_base), do_newline=True) subprocess.check_call(['git', 'submodule', 'update', '--init'], cwd=stack_base, stderr=subprocess.STDOUT) jobset.message('SUCCESS', 'git submodule update --init', '%s: %s' % (str(output), commit_log), do_newline=True) # Write git log to commit_log so it can be packaged with the docker image. with open(os.path.join(stack_base, 'commit_log'), 'w') as f: f.write(commit_log) return stack_base languages = args.language if args.language != ['all'] else _LANGUAGES for lang in languages: docker_images = build_all_images_for_lang(lang) for image in docker_images: if args.upload_images: jobset.message('START', 'Uploading %s' % image, do_newline=True) # docker image name must be in the format /: assert image.startswith(args.gcr_path) and image.find(':') != -1 subprocess.call(['gcloud', 'docker', '--', 'push', image]) else: # Uploading (and overwriting images) by default can easily break things. print( 'Not uploading image %s, run with --upload_images to upload.' % image) # -*- coding: utf-8 -*- """ pygments.scanner ~~~~~~~~~~~~~~~~ This library implements a regex based scanner. Some languages like Pascal are easy to parse but have some keywords that depend on the context. Because of this it's impossible to lex that just by using a regular expression lexer like the `RegexLexer`. Have a look at the `DelphiLexer` to get an idea of how to use this scanner. :copyright: Copyright 2006-2013 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re class EndOfText(RuntimeError): """ Raise if end of text is reached and the user tried to call a match function. """ class Scanner(object): """ Simple scanner All method patterns are regular expression strings (not compiled expressions!) """ def __init__(self, text, flags=0): """ :param text: The text which should be scanned :param flags: default regular expression flags """ self.data = text self.data_length = len(text) self.start_pos = 0 self.pos = 0 self.flags = flags self.last = None self.match = None self._re_cache = {} def eos(self): """`True` if the scanner reached the end of text.""" return self.pos >= self.data_length eos = property(eos, eos.__doc__) def check(self, pattern): """ Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead. """ if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) return self._re_cache[pattern].match(self.data, self.pos) def test(self, pattern): """Apply a pattern on the current position and check if it patches. Doesn't touch pos.""" return self.check(pattern) is not None def scan(self, pattern): """ Scan the text for the given pattern and update pos/match and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``pos`` is the end position. """ if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) self.last = self.match m = self._re_cache[pattern].match(self.data, self.pos) if m is None: return False self.start_pos = m.start() self.pos = m.end() self.match = m.group() return True def get_char(self): """Scan exactly one char.""" self.scan('.') def __repr__(self): return '<%s %d/%d>' % ( self.__class__.__name__, self.pos, self.data_length ) """ Various complex queries that have been problematic in the past. """ from __future__ import unicode_literals import threading from django.db import models from django.utils import six from django.utils.encoding import python_2_unicode_compatible class DumbCategory(models.Model): pass class ProxyCategory(DumbCategory): class Meta: proxy = True @python_2_unicode_compatible class NamedCategory(DumbCategory): name = models.CharField(max_length=10) def __str__(self): return self.name @python_2_unicode_compatible class Tag(models.Model): name = models.CharField(max_length=10) parent = models.ForeignKey( 'self', models.SET_NULL, blank=True, null=True, related_name='children', ) category = models.ForeignKey(NamedCategory, models.SET_NULL, null=True, default=None) class Meta: ordering = ['name'] def __str__(self): return self.name @python_2_unicode_compatible class Note(models.Model): note = models.CharField(max_length=100) misc = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.SET_NULL, blank=True, null=True) class Meta: ordering = ['note'] def __str__(self): return self.note def __init__(self, *args, **kwargs): super(Note, self).__init__(*args, **kwargs) # Regression for #13227 -- having an attribute that # is unpickleable doesn't stop you from cloning queries # that use objects of that type as an argument. self.lock = threading.Lock() @python_2_unicode_compatible class Annotation(models.Model): name = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) notes = models.ManyToManyField(Note) def __str__(self): return self.name @python_2_unicode_compatible class ExtraInfo(models.Model): info = models.CharField(max_length=100) note = models.ForeignKey(Note, models.CASCADE) value = models.IntegerField(null=True) class Meta: ordering = ['info'] def __str__(self): return self.info @python_2_unicode_compatible class Author(models.Model): name = models.CharField(max_length=10) num = models.IntegerField(unique=True) extra = models.ForeignKey(ExtraInfo, models.CASCADE) class Meta: ordering = ['name'] def __str__(self): return self.name @python_2_unicode_compatible class Item(models.Model): name = models.CharField(max_length=10) created = models.DateTimeField() modified = models.DateTimeField(blank=True, null=True) tags = models.ManyToManyField(Tag, blank=True) creator = models.ForeignKey(Author, models.CASCADE) note = models.ForeignKey(Note, models.CASCADE) class Meta: ordering = ['-note', 'name'] def __str__(self): return self.name @python_2_unicode_compatible class Report(models.Model): name = models.CharField(max_length=10) creator = models.ForeignKey(Author, models.SET_NULL, to_field='num', null=True) def __str__(self): return self.name @python_2_unicode_compatible class Ranking(models.Model): rank = models.IntegerField() author = models.ForeignKey(Author, models.CASCADE) class Meta: # A complex ordering specification. Should stress the system a bit. ordering = ('author__extra__note', 'author__name', 'rank') def __str__(self): return '%d: %s' % (self.rank, self.author.name) @python_2_unicode_compatible class Cover(models.Model): title = models.CharField(max_length=50) item = models.ForeignKey(Item, models.CASCADE) class Meta: ordering = ['item'] def __str__(self): return self.title @python_2_unicode_compatible class Number(models.Model): num = models.IntegerField() def __str__(self): return six.text_type(self.num) # Symmetrical m2m field with a normal field using the reverse accessor name # ("valid"). class Valid(models.Model): valid = models.CharField(max_length=10) parent = models.ManyToManyField('self') class Meta: ordering = ['valid'] # Some funky cross-linked models for testing a couple of infinite recursion # cases. class X(models.Model): y = models.ForeignKey('Y', models.CASCADE) class Y(models.Model): x1 = models.ForeignKey(X, models.CASCADE, related_name='y1') # Some models with a cycle in the default ordering. This would be bad if we # didn't catch the infinite loop. class LoopX(models.Model): y = models.ForeignKey('LoopY', models.CASCADE) class Meta: ordering = ['y'] class LoopY(models.Model): x = models.ForeignKey(LoopX, models.CASCADE) class Meta: ordering = ['x'] class LoopZ(models.Model): z = models.ForeignKey('self', models.CASCADE) class Meta: ordering = ['z'] # A model and custom default manager combination. class CustomManager(models.Manager): def get_queryset(self): qs = super(CustomManager, self).get_queryset() return qs.filter(public=True, tag__name='t1') @python_2_unicode_compatible class ManagedModel(models.Model): data = models.CharField(max_length=10) tag = models.ForeignKey(Tag, models.CASCADE) public = models.BooleanField(default=True) objects = CustomManager() normal_manager = models.Manager() def __str__(self): return self.data # An inter-related setup with multiple paths from Child to Detail. class Detail(models.Model): data = models.CharField(max_length=10) class MemberManager(models.Manager): def get_queryset(self): return super(MemberManager, self).get_queryset().select_related("details") class Member(models.Model): name = models.CharField(max_length=10) details = models.OneToOneField(Detail, models.CASCADE, primary_key=True) objects = MemberManager() class Child(models.Model): person = models.OneToOneField(Member, models.CASCADE, primary_key=True) parent = models.ForeignKey(Member, models.CASCADE, related_name="children") # Custom primary keys interfered with ordering in the past. class CustomPk(models.Model): name = models.CharField(max_length=10, primary_key=True) extra = models.CharField(max_length=10) class Meta: ordering = ['name', 'extra'] class Related(models.Model): custom = models.ForeignKey(CustomPk, models.CASCADE) class CustomPkTag(models.Model): id = models.CharField(max_length=20, primary_key=True) custom_pk = models.ManyToManyField(CustomPk) tag = models.CharField(max_length=20) # An inter-related setup with a model subclass that has a nullable # path to another model, and a return path from that model. @python_2_unicode_compatible class Celebrity(models.Model): name = models.CharField("Name", max_length=20) greatest_fan = models.ForeignKey("Fan", models.SET_NULL, null=True, unique=True) def __str__(self): return self.name class TvChef(Celebrity): pass class Fan(models.Model): fan_of = models.ForeignKey(Celebrity, models.CASCADE) # Multiple foreign keys @python_2_unicode_compatible class LeafA(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class LeafB(models.Model): data = models.CharField(max_length=10) class Join(models.Model): a = models.ForeignKey(LeafA, models.CASCADE) b = models.ForeignKey(LeafB, models.CASCADE) @python_2_unicode_compatible class ReservedName(models.Model): name = models.CharField(max_length=20) order = models.IntegerField() def __str__(self): return self.name # A simpler shared-foreign-key setup that can expose some problems. @python_2_unicode_compatible class SharedConnection(models.Model): data = models.CharField(max_length=10) def __str__(self): return self.data class PointerA(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) class PointerB(models.Model): connection = models.ForeignKey(SharedConnection, models.CASCADE) # Multi-layer ordering @python_2_unicode_compatible class SingleObject(models.Model): name = models.CharField(max_length=10) class Meta: ordering = ['name'] def __str__(self): return self.name class RelatedObject(models.Model): single = models.ForeignKey(SingleObject, models.SET_NULL, null=True) f = models.IntegerField(null=True) class Meta: ordering = ['single'] @python_2_unicode_compatible class Plaything(models.Model): name = models.CharField(max_length=10) others = models.ForeignKey(RelatedObject, models.SET_NULL, null=True) class Meta: ordering = ['others'] def __str__(self): return self.name @python_2_unicode_compatible class Article(models.Model): name = models.CharField(max_length=20) created = models.DateTimeField() def __str__(self): return self.name @python_2_unicode_compatible class Food(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name @python_2_unicode_compatible class Eaten(models.Model): food = models.ForeignKey(Food, models.SET_NULL, to_field="name", null=True) meal = models.CharField(max_length=20) def __str__(self): return "%s at %s" % (self.food, self.meal) @python_2_unicode_compatible class Node(models.Model): num = models.IntegerField(unique=True) parent = models.ForeignKey("self", models.SET_NULL, to_field="num", null=True) def __str__(self): return "%s" % self.num # Bug #12252 @python_2_unicode_compatible class ObjectA(models.Model): name = models.CharField(max_length=50) def __str__(self): return self.name def __iter__(self): # Ticket #23721 assert False, 'type checking should happen without calling model __iter__' class ProxyObjectA(ObjectA): class Meta: proxy = True class ChildObjectA(ObjectA): pass @python_2_unicode_compatible class ObjectB(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.CASCADE) num = models.PositiveSmallIntegerField() def __str__(self): return self.name class ProxyObjectB(ObjectB): class Meta: proxy = True @python_2_unicode_compatible class ObjectC(models.Model): name = models.CharField(max_length=50) objecta = models.ForeignKey(ObjectA, models.SET_NULL, null=True) objectb = models.ForeignKey(ObjectB, models.SET_NULL, null=True) childobjecta = models.ForeignKey(ChildObjectA, models.SET_NULL, null=True, related_name='ca_pk') def __str__(self): return self.name @python_2_unicode_compatible class SimpleCategory(models.Model): name = models.CharField(max_length=15) def __str__(self): return self.name @python_2_unicode_compatible class SpecialCategory(SimpleCategory): special_name = models.CharField(max_length=15) def __str__(self): return self.name + " " + self.special_name @python_2_unicode_compatible class CategoryItem(models.Model): category = models.ForeignKey(SimpleCategory, models.CASCADE) def __str__(self): return "category item: " + str(self.category) @python_2_unicode_compatible class OneToOneCategory(models.Model): new_name = models.CharField(max_length=15) category = models.OneToOneField(SimpleCategory, models.CASCADE) def __str__(self): return "one2one " + self.new_name class CategoryRelationship(models.Model): first = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='first_rel') second = models.ForeignKey(SimpleCategory, models.CASCADE, related_name='second_rel') class NullableName(models.Model): name = models.CharField(max_length=20, null=True) class Meta: ordering = ['id'] class ModelD(models.Model): name = models.TextField() class ModelC(models.Model): name = models.TextField() class ModelB(models.Model): name = models.TextField() c = models.ForeignKey(ModelC, models.CASCADE) class ModelA(models.Model): name = models.TextField() b = models.ForeignKey(ModelB, models.SET_NULL, null=True) d = models.ForeignKey(ModelD, models.CASCADE) @python_2_unicode_compatible class Job(models.Model): name = models.CharField(max_length=20, unique=True) def __str__(self): return self.name class JobResponsibilities(models.Model): job = models.ForeignKey(Job, models.SET_NULL, to_field='name') responsibility = models.ForeignKey('Responsibility', models.SET_NULL, to_field='description') @python_2_unicode_compatible class Responsibility(models.Model): description = models.CharField(max_length=20, unique=True) jobs = models.ManyToManyField(Job, through=JobResponsibilities, related_name='responsibilities') def __str__(self): return self.description # Models for disjunction join promotion low level testing. class FK1(models.Model): f1 = models.TextField() f2 = models.TextField() class FK2(models.Model): f1 = models.TextField() f2 = models.TextField() class FK3(models.Model): f1 = models.TextField() f2 = models.TextField() class BaseA(models.Model): a = models.ForeignKey(FK1, models.SET_NULL, null=True) b = models.ForeignKey(FK2, models.SET_NULL, null=True) c = models.ForeignKey(FK3, models.SET_NULL, null=True) @python_2_unicode_compatible class Identifier(models.Model): name = models.CharField(max_length=100) def __str__(self): return self.name class Program(models.Model): identifier = models.OneToOneField(Identifier, models.CASCADE) class Channel(models.Model): programs = models.ManyToManyField(Program) identifier = models.OneToOneField(Identifier, models.CASCADE) class Book(models.Model): title = models.TextField() chapter = models.ForeignKey('Chapter', models.CASCADE) class Chapter(models.Model): title = models.TextField() paragraph = models.ForeignKey('Paragraph', models.CASCADE) class Paragraph(models.Model): text = models.TextField() page = models.ManyToManyField('Page') class Page(models.Model): text = models.TextField() class MyObject(models.Model): parent = models.ForeignKey('self', models.SET_NULL, null=True, blank=True, related_name='children') data = models.CharField(max_length=100) created_at = models.DateTimeField(auto_now_add=True) # Models for #17600 regressions @python_2_unicode_compatible class Order(models.Model): id = models.IntegerField(primary_key=True) class Meta: ordering = ('pk', ) def __str__(self): return '%s' % self.pk @python_2_unicode_compatible class OrderItem(models.Model): order = models.ForeignKey(Order, models.SET_NULL, related_name='items') status = models.IntegerField() class Meta: ordering = ('pk', ) def __str__(self): return '%s' % self.pk class BaseUser(models.Model): pass @python_2_unicode_compatible class Task(models.Model): title = models.CharField(max_length=10) owner = models.ForeignKey(BaseUser, models.SET_NULL, related_name='owner') creator = models.ForeignKey(BaseUser, models.SET_NULL, related_name='creator') def __str__(self): return self.title @python_2_unicode_compatible class Staff(models.Model): name = models.CharField(max_length=10) def __str__(self): return self.name @python_2_unicode_compatible class StaffUser(BaseUser): staff = models.OneToOneField(Staff, models.SET_NULL, related_name='user') def __str__(self): return self.staff class Ticket21203Parent(models.Model): parentid = models.AutoField(primary_key=True) parent_bool = models.BooleanField(default=True) created = models.DateTimeField(auto_now=True) class Ticket21203Child(models.Model): childid = models.AutoField(primary_key=True) parent = models.ForeignKey(Ticket21203Parent, models.CASCADE) class Person(models.Model): name = models.CharField(max_length=128) @python_2_unicode_compatible class Company(models.Model): name = models.CharField(max_length=128) employees = models.ManyToManyField(Person, related_name='employers', through='Employment') def __str__(self): return self.name class Employment(models.Model): employer = models.ForeignKey(Company, models.CASCADE) employee = models.ForeignKey(Person, models.CASCADE) title = models.CharField(max_length=128) # Bug #22429 class School(models.Model): pass class Student(models.Model): school = models.ForeignKey(School, models.CASCADE) class Classroom(models.Model): school = models.ForeignKey(School, models.CASCADE) students = models.ManyToManyField(Student, related_name='classroom') class Ticket23605AParent(models.Model): pass class Ticket23605A(Ticket23605AParent): pass class Ticket23605B(models.Model): modela_fk = models.ForeignKey(Ticket23605A, models.CASCADE) modelc_fk = models.ForeignKey("Ticket23605C", models.CASCADE) field_b0 = models.IntegerField(null=True) field_b1 = models.BooleanField(default=False) class Ticket23605C(models.Model): field_c0 = models.FloatField() # db_table names have capital letters to ensure they are quoted in queries. class Individual(models.Model): alive = models.BooleanField() class Meta: db_table = 'Individual' class RelatedIndividual(models.Model): related = models.ForeignKey(Individual, models.CASCADE, related_name='related_individual') class Meta: db_table = 'RelatedIndividual' """Django Selenium test runner. Incorporate functional testing into Django's manage.py test subcommand using Selenium web testing tools.""" __author__ = 'Daniel Mizyrycki' __copyright__ = 'Copyright 2009, Daniel Mizyrycki' __license__ = 'BSD' __version__ = '0.1.0' __maintainer__ = __author__ __email__ = 'mzdaniel@gmail.com' __status__ = 'Development' __url__ = 'http://pypi.python.org/pypi/django-selenium-test-runner' __summary__ = __doc__ from django.conf import settings from django.core.management import setup_environ, import_module, call_command if not hasattr(settings, 'SETTINGS_MODULE'): settings.configure() else: PROJECT_PATH = setup_environ(import_module(settings.SETTINGS_MODULE), settings.SETTINGS_MODULE) import os, sys, re, threading, unittest, shutil from urlparse import urlparse from subprocess import Popen, PIPE from signal import SIGHUP from time import sleep from django.db import connection from django.db.models import get_app, get_apps from django.test.simple import run_tests as base_run_tests from django.core.handlers.wsgi import WSGIHandler from django.contrib import admin from wsgiserver import CherryPyWSGIServer, WSGIPathInfoDispatcher from mediahandler import MediaHandler SELENIUM_TESTS_PATH = 'tests/selenium' FIXTURES = ['tests/data.json'] DSTEST_PATH = os.path.dirname(__file__) TEST_DB_NAME = 'test_fixture_db' SELENIUM_RC_PATH = os.path.join(DSTEST_PATH, 'selenium-server.jar') CPSERVER_OPTIONS = {'host': 'localhost', 'port': 8000, 'threads': 10, 'request_queue_size': 15} # Overwrite default settings from settings.py if they are defined. if hasattr(settings, 'SELENIUM_TESTS_PATH'): SELENIUM_TESTS_PATH = settings.SELENIUM_TESTS_PATH if hasattr(settings, 'FIXTURES'): FIXTURES = settings.FIXTURES if hasattr(settings, 'SELENIUM_PATH'): SELENIUM_RC_PATH = os.path.join(settings.SELENIUM_PATH, 'selenium-server.jar') sys.path += [settings.SELENIUM_PATH] sys.path += [DSTEST_PATH] class SeleniumRCThread(threading.Thread): """Selenium RC control thread.""" def __init__(self, server_filepath): super(SeleniumRCThread, self).__init__() self.server_filepath = server_filepath self.process = None def run(self): """Launch Selenium server.""" self.process = Popen(('java -jar %s' % self.server_filepath).split(), shell=False, stdout=PIPE, stderr=PIPE) def stop(self): """Stop Selenium server.""" os.kill(self.process.pid, SIGHUP) class TestDB(object): """Encapsulate fixtured database handling for tests to be used by Django web server. As the Django connection is global, this class will setup TEST_DB_NAME as the database in use.""" def __init__(self, db_name, fixtures, verbosity=0): """Initialize TestDB.""" self.db_name = db_name self.fixtures = fixtures self.verbosity = verbosity # Save the real database names for later connection restore. self.database_name = settings.DATABASE_NAME self.test_database_name = settings.TEST_DATABASE_NAME self.db_path = None self.db_backup_path = None def initialize_test_db(self): """Establish a connection to a fresh TEST_DB_NAME database with the test fixtures on it.""" # Create a test database and sync it with models.py # Handle a second test database for selenium use. Postgres uses # transactions which interfere with the Django server thread. settings.TEST_DATABASE_NAME = self.db_name connection.creation.create_test_db(verbosity=self.verbosity, autoclobber=True) # Hook for doing any extra initialization self.extra_init() # Load fixture data. call_command('loaddata', *self.fixtures, verbosity=self.verbosity) # Sync data and close connection connection.close() # If sqlite3 or Postgres is used, create a backup database to speed up # fixture reloading. if settings.DATABASE_ENGINE == 'postgresql_psycopg2': # connection.creation is used to overcome transaction management, # allowing to execute DROP and CREATE db commands. cursor = connection.cursor() connection.creation.set_autocommit() cursor.execute("DROP DATABASE IF EXISTS %s_backup" % self.db_name) cursor.execute("CREATE DATABASE %s_backup WITH TEMPLATE %s" % ( self.db_name, self.db_name)) if settings.DATABASE_ENGINE == 'sqlite3': self.db_path = os.path.join(PROJECT_PATH, settings.DATABASE_NAME) self.db_backup_path = '%s_backup' % self.db_path if self.db_path[-3:] == '.db': self.db_backup_path = '%s_backup.db' % self.db_path[:-3] shutil.copyfile(self.db_path, self.db_backup_path) # Restore the database names as create_test_db changed it. settings.TEST_DATABASE_NAME = self.test_database_name settings.DATABASE_NAME = self.database_name def extra_init(self): """Hook for doing any extra initialization. After subclassing TestDB, and overriding this method, initialize_test_db will call it.""" pass def reload_db(self): """Reload fixtures into test database. This is a database dependant method. For now, only works on Postgres.""" if not settings.DATABASE_ENGINE in ['sqlite3', 'postgresql_psycopg2']: return None # Close connection to cleanly swap databases. connection.close() if settings.DATABASE_ENGINE == 'sqlite3': shutil.copyfile(self.db_backup_path, self.db_path) if settings.DATABASE_ENGINE == 'postgresql_psycopg2': # Establish a temporal connection to template1 database and # recreate TEST_DB_NAME. connection.settings_dict["DATABASE_NAME"] = 'template1' cursor = connection.cursor() connection.creation.set_autocommit() cursor.execute("DROP DATABASE IF EXISTS %s" % self.db_name) cursor.execute("CREATE DATABASE %s WITH TEMPLATE %s_backup" % ( self.db_name, self.db_name)) connection.close() # Change the connection to the new test database. settings.DATABASE_NAME = self.db_name connection.settings_dict["DATABASE_NAME"] = self.db_name # Get a cursor (even though we don't need one yet). This has # the side effect of initializing the test database. connection.cursor() return True def drop(self): """Drop test database. This is a database dependant method. For now, only works on Postgres.""" def drop_db(name): """TestDB.drop helper function""" try: connection.creation._destroy_test_db(name, verbosity=0) except: return None return True if not settings.DATABASE_ENGINE in ['sqlite3', 'postgresql_psycopg2']: return None connection.close() if settings.DATABASE_ENGINE == 'postgresql_psycopg2': connection.settings_dict["DATABASE_NAME"] = 'template1' drop_db('%s_backup' % self.db_name) drop_db(self.db_name) drop_db(self.test_database_name) # restore the connection to the original database. settings.TEST_DATABASE_NAME = self.test_database_name settings.DATABASE_NAME = self.database_name connection.settings_dict["DATABASE_NAME"] = self.database_name connection.cursor() class DjangoThread(threading.Thread): """Django server control thread.""" def __init__(self, testdb): """Initialize CherryPy Django web server.""" super(DjangoThread, self).__init__() testdb.initialize_test_db() self.setDaemon(True) def run(self): """Launch CherryPy Django web server.""" options = CPSERVER_OPTIONS server = CherryPyWSGIServer( (options['host'], int(options['port'])), WSGIPathInfoDispatcher({ '/': WSGIHandler(), urlparse(settings.MEDIA_URL).path: MediaHandler( settings.MEDIA_ROOT), settings.ADMIN_MEDIA_PREFIX: MediaHandler( os.path.join(admin.__path__[0], 'media')) }), int(options['threads']), options['host'], request_queue_size=int(options['request_queue_size'])) try: server.start() except KeyboardInterrupt: server.stop() def get_selenium_tests(testdb, test_labels=None): """Import selenium tests stored on path/SELENIUM_TESTS_PATH.""" def load_tests(module_path): """Import selenium tests.""" def add_fixtures(ctest): """Monkeypatch selenium tests to add django fixtures.""" def test_setup(funct): """Test setUp decorator to add fixture reloading.""" def decorated_setup(): """Decorated test setup.""" testdb.reload_db() funct() return decorated_setup for test in ctest._tests: test.setUp = test_setup(test.setUp) # Check dependencies before loading test. tests = [] test_path = os.path.join(module_path, SELENIUM_TESTS_PATH) if not os.path.isdir(test_path): return tests sys.path += [test_path] # Monkeypatch selenium tests to reload fixtures into Django server db. for filename in os.listdir(test_path): if not re.search('^test_.+\.py$', filename): continue test_module = __import__(filename[:-len('.py')]) # Add all unittests from module for test_name in test_module.__dict__: test_case = test_module.__dict__[test_name] if not (type(test_case) is type(unittest.TestCase) and \ issubclass(test_case, unittest.TestCase)): continue test = unittest.TestLoader().loadTestsFromTestCase(test_case) # Setup fixtures for the test. add_fixtures(test) tests.append(test) return tests tests = [] if test_labels: for label in test_labels: tests += load_tests(os.path.dirname(get_app(label).__file__)) else: for app in get_apps(): tests += load_tests(os.path.dirname(app.__file__)) return tests def dependencies_met(): """Check Selenium testing dependencies are met""" # Check Java VM command line runner. try: Popen(['java'], shell=False, stderr=PIPE).communicate()[1] except: print 'Dependecy unmet. Java virtual machine command line runner not ' \ 'found.' return False # Check selenium-server.jar is ready to run. output = Popen(('java -jar %s -unrecognized_argument' % SELENIUM_RC_PATH ).split(), shell=False, stderr=PIPE).communicate()[1] if not re.search('Usage: java -jar selenium-server.jar', output): print 'Dependecy unmet. Selenium RC server (selenium-server.jar) not ' \ 'found.' return False # Check selenium RC python driver is available. try: import selenium except: print 'Dependecy unmet. Selenium RC python driver (selenium.py) not ' \ 'found.' return False # Check CherryPy wsgi server is available. try: import wsgiserver except: print 'Dependecy unmet. CherryPy wsgi server (wsgiserver.py) not found.' return False # Check fixture support is implemented for the database engine. if not settings.DATABASE_ENGINE in ['sqlite3', 'postgresql_psycopg2']: print 'Dependecy unmet. Fixture support for database engine %s not ' \ 'implemented.' % settings.DATABASE_ENGINE return False return True def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=None): """Selenium Test runner.""" if not extra_tests: extra_tests = [] dependencies = dependencies_met() if dependencies and not extra_tests: # Obtain a database test handler. testdb = TestDB(TEST_DB_NAME, FIXTURES, verbosity=0) extra_tests = get_selenium_tests(testdb, test_labels) if dependencies and extra_tests: print 'Preparing to run unittests and selenium tests.' # Start selenium rc and Django servers. selenium_rc = SeleniumRCThread(SELENIUM_RC_PATH) selenium_rc.start() django_server = DjangoThread(testdb) django_server.start() # Wait a couple of seconds for the servers to initialize. sleep(5) else: extra_tests = [] print 'Running unittests but not selenium tests.' results = base_run_tests(test_labels, verbosity, interactive, extra_tests) if extra_tests: # Stop selenium server, and drop test database selenium_rc.stop() testdb.drop() return results # # Module providing the `Pool` class for managing a process pool # # multiprocessing/pool.py # # Copyright (c) 2006-2008, R Oudkerk # Licensed to PSF under a Contributor Agreement. # __all__ = ['Pool', 'ThreadPool'] # # Imports # import threading import queue import itertools import collections import os import time import traceback # If threading is available then ThreadPool should be provided. Therefore # we avoid top-level imports which are liable to fail on some systems. from . import util from . import get_context, TimeoutError # # Constants representing the state of a pool # RUN = 0 CLOSE = 1 TERMINATE = 2 # # Miscellaneous # job_counter = itertools.count() def mapstar(args): return list(map(*args)) def starmapstar(args): return list(itertools.starmap(args[0], args[1])) # # Hack to embed stringification of remote traceback in local traceback # class RemoteTraceback(Exception): def __init__(self, tb): self.tb = tb def __str__(self): return self.tb class ExceptionWithTraceback: def __init__(self, exc, tb): tb = traceback.format_exception(type(exc), exc, tb) tb = ''.join(tb) self.exc = exc self.tb = '\n"""\n%s"""' % tb def __reduce__(self): return rebuild_exc, (self.exc, self.tb) def rebuild_exc(exc, tb): exc.__cause__ = RemoteTraceback(tb) return exc # # Code run by worker processes # class MaybeEncodingError(Exception): """Wraps possible unpickleable errors, so they can be safely sent through the socket.""" def __init__(self, exc, value): self.exc = repr(exc) self.value = repr(value) super(MaybeEncodingError, self).__init__(self.exc, self.value) def __str__(self): return "Error sending result: '%s'. Reason: '%s'" % (self.value, self.exc) def __repr__(self): return "" % str(self) def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, wrap_exception=False): assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0) put = outqueue.put get = inqueue.get if hasattr(inqueue, '_writer'): inqueue._writer.close() outqueue._reader.close() if initializer is not None: initializer(*initargs) completed = 0 while maxtasks is None or (maxtasks and completed < maxtasks): try: task = get() except (EOFError, OSError): util.debug('worker got EOFError or OSError -- exiting') break if task is None: util.debug('worker got sentinel -- exiting') break job, i, func, args, kwds = task try: result = (True, func(*args, **kwds)) except Exception as e: if wrap_exception: e = ExceptionWithTraceback(e, e.__traceback__) result = (False, e) try: put((job, i, result)) except Exception as e: wrapped = MaybeEncodingError(e, result[1]) util.debug("Possible encoding error while sending result: %s" % ( wrapped)) put((job, i, (False, wrapped))) completed += 1 util.debug('worker exiting after %d tasks' % completed) # # Class representing a process pool # class Pool(object): ''' Class which supports an async version of applying functions to arguments. ''' _wrap_exception = True def Process(self, *args, **kwds): return self._ctx.Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=(), maxtasksperchild=None, context=None): self._ctx = context or get_context() self._setup_queues() self._taskqueue = queue.Queue() self._cache = {} self._state = RUN self._maxtasksperchild = maxtasksperchild self._initializer = initializer self._initargs = initargs if processes is None: processes = os.cpu_count() or 1 if processes < 1: raise ValueError("Number of processes must be at least 1") if initializer is not None and not callable(initializer): raise TypeError('initializer must be a callable') self._processes = processes self._pool = [] self._repopulate_pool() self._worker_handler = threading.Thread( target=Pool._handle_workers, args=(self, ) ) self._worker_handler.daemon = True self._worker_handler._state = RUN self._worker_handler.start() self._task_handler = threading.Thread( target=Pool._handle_tasks, args=(self._taskqueue, self._quick_put, self._outqueue, self._pool, self._cache) ) self._task_handler.daemon = True self._task_handler._state = RUN self._task_handler.start() self._result_handler = threading.Thread( target=Pool._handle_results, args=(self._outqueue, self._quick_get, self._cache) ) self._result_handler.daemon = True self._result_handler._state = RUN self._result_handler.start() self._terminate = util.Finalize( self, self._terminate_pool, args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, self._worker_handler, self._task_handler, self._result_handler, self._cache), exitpriority=15 ) def _join_exited_workers(self): """Cleanup after any worker processes which have exited due to reaching their specified lifetime. Returns True if any workers were cleaned up. """ cleaned = False for i in reversed(range(len(self._pool))): worker = self._pool[i] if worker.exitcode is not None: # worker exited util.debug('cleaning up worker %d' % i) worker.join() cleaned = True del self._pool[i] return cleaned def _repopulate_pool(self): """Bring the number of pool processes up to the specified number, for use after reaping workers which have exited. """ for i in range(self._processes - len(self._pool)): w = self.Process(target=worker, args=(self._inqueue, self._outqueue, self._initializer, self._initargs, self._maxtasksperchild, self._wrap_exception) ) self._pool.append(w) w.name = w.name.replace('Process', 'PoolWorker') w.daemon = True w.start() util.debug('added worker') def _maintain_pool(self): """Clean up any exited workers and start replacements for them. """ if self._join_exited_workers(): self._repopulate_pool() def _setup_queues(self): self._inqueue = self._ctx.SimpleQueue() self._outqueue = self._ctx.SimpleQueue() self._quick_put = self._inqueue._writer.send self._quick_get = self._outqueue._reader.recv def apply(self, func, args=(), kwds={}): ''' Equivalent of `func(*args, **kwds)`. ''' assert self._state == RUN return self.apply_async(func, args, kwds).get() def map(self, func, iterable, chunksize=None): ''' Apply `func` to each element in `iterable`, collecting the results in a list that is returned. ''' return self._map_async(func, iterable, mapstar, chunksize).get() def starmap(self, func, iterable, chunksize=None): ''' Like `map()` method but the elements of the `iterable` are expected to be iterables as well and will be unpacked as arguments. Hence `func` and (a, b) becomes func(a, b). ''' return self._map_async(func, iterable, starmapstar, chunksize).get() def starmap_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `starmap()` method. ''' return self._map_async(func, iterable, starmapstar, chunksize, callback, error_callback) def imap(self, func, iterable, chunksize=1): ''' Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. ''' if self._state != RUN: raise ValueError("Pool not running") if chunksize == 1: result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def imap_unordered(self, func, iterable, chunksize=1): ''' Like `imap()` method but ordering of results is arbitrary. ''' if self._state != RUN: raise ValueError("Pool not running") if chunksize == 1: result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, func, (x,), {}) for i, x in enumerate(iterable)), result._set_length)) return result else: assert chunksize > 1 task_batches = Pool._get_tasks(func, iterable, chunksize) result = IMapUnorderedIterator(self._cache) self._taskqueue.put((((result._job, i, mapstar, (x,), {}) for i, x in enumerate(task_batches)), result._set_length)) return (item for chunk in result for item in chunk) def apply_async(self, func, args=(), kwds={}, callback=None, error_callback=None): ''' Asynchronous version of `apply()` method. ''' if self._state != RUN: raise ValueError("Pool not running") result = ApplyResult(self._cache, callback, error_callback) self._taskqueue.put(([(result._job, None, func, args, kwds)], None)) return result def map_async(self, func, iterable, chunksize=None, callback=None, error_callback=None): ''' Asynchronous version of `map()` method. ''' return self._map_async(func, iterable, mapstar, chunksize, callback, error_callback) def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, error_callback=None): ''' Helper function to implement map, starmap and their async counterparts. ''' if self._state != RUN: raise ValueError("Pool not running") if not hasattr(iterable, '__len__'): iterable = list(iterable) if chunksize is None: chunksize, extra = divmod(len(iterable), len(self._pool) * 4) if extra: chunksize += 1 if len(iterable) == 0: chunksize = 0 task_batches = Pool._get_tasks(func, iterable, chunksize) result = MapResult(self._cache, chunksize, len(iterable), callback, error_callback=error_callback) self._taskqueue.put((((result._job, i, mapper, (x,), {}) for i, x in enumerate(task_batches)), None)) return result @staticmethod def _handle_workers(pool): thread = threading.current_thread() # Keep maintaining workers until the cache gets drained, unless the pool # is terminated. while thread._state == RUN or (pool._cache and thread._state != TERMINATE): pool._maintain_pool() time.sleep(0.1) # send sentinel to stop workers pool._taskqueue.put(None) util.debug('worker handler exiting') @staticmethod def _handle_tasks(taskqueue, put, outqueue, pool, cache): thread = threading.current_thread() for taskseq, set_length in iter(taskqueue.get, None): i = -1 for i, task in enumerate(taskseq): if thread._state: util.debug('task handler found thread._state != RUN') break try: put(task) except Exception as e: job, ind = task[:2] try: cache[job]._set(ind, (False, e)) except KeyError: pass else: if set_length: util.debug('doing set_length()') set_length(i+1) continue break else: util.debug('task handler got sentinel') try: # tell result handler to finish when cache is empty util.debug('task handler sending sentinel to result handler') outqueue.put(None) # tell workers there is no more work util.debug('task handler sending sentinel to workers') for p in pool: put(None) except OSError: util.debug('task handler got OSError when sending sentinels') util.debug('task handler exiting') @staticmethod def _handle_results(outqueue, get, cache): thread = threading.current_thread() while 1: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if thread._state: assert thread._state == TERMINATE util.debug('result handler found thread._state=TERMINATE') break if task is None: util.debug('result handler got sentinel') break job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass while cache and thread._state != TERMINATE: try: task = get() except (OSError, EOFError): util.debug('result handler got EOFError/OSError -- exiting') return if task is None: util.debug('result handler ignoring extra sentinel') continue job, i, obj = task try: cache[job]._set(i, obj) except KeyError: pass if hasattr(outqueue, '_reader'): util.debug('ensuring that outqueue is not full') # If we don't make room available in outqueue then # attempts to add the sentinel (None) to outqueue may # block. There is guaranteed to be no more than 2 sentinels. try: for i in range(10): if not outqueue._reader.poll(): break get() except (OSError, EOFError): pass util.debug('result handler exiting: len(cache)=%s, thread._state=%s', len(cache), thread._state) @staticmethod def _get_tasks(func, it, size): it = iter(it) while 1: x = tuple(itertools.islice(it, size)) if not x: return yield (func, x) def __reduce__(self): raise NotImplementedError( 'pool objects cannot be passed between processes or pickled' ) def close(self): util.debug('closing pool') if self._state == RUN: self._state = CLOSE self._worker_handler._state = CLOSE def terminate(self): util.debug('terminating pool') self._state = TERMINATE self._worker_handler._state = TERMINATE self._terminate() def join(self): util.debug('joining pool') assert self._state in (CLOSE, TERMINATE) self._worker_handler.join() self._task_handler.join() self._result_handler.join() for p in self._pool: p.join() @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # task_handler may be blocked trying to put items on inqueue util.debug('removing tasks from inqueue until task handler finished') inqueue._rlock.acquire() while task_handler.is_alive() and inqueue._reader.poll(): inqueue._reader.recv() time.sleep(0) @classmethod def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, worker_handler, task_handler, result_handler, cache): # this is guaranteed to only be called once util.debug('finalizing pool') worker_handler._state = TERMINATE task_handler._state = TERMINATE util.debug('helping task handler/workers to finish') cls._help_stuff_finish(inqueue, task_handler, len(pool)) assert result_handler.is_alive() or len(cache) == 0 result_handler._state = TERMINATE outqueue.put(None) # sentinel # We must wait for the worker handler to exit before terminating # workers because we don't want workers to be restarted behind our back. util.debug('joining worker handler') if threading.current_thread() is not worker_handler: worker_handler.join() # Terminate workers which haven't already finished. if pool and hasattr(pool[0], 'terminate'): util.debug('terminating workers') for p in pool: if p.exitcode is None: p.terminate() util.debug('joining task handler') if threading.current_thread() is not task_handler: task_handler.join() util.debug('joining result handler') if threading.current_thread() is not result_handler: result_handler.join() if pool and hasattr(pool[0], 'terminate'): util.debug('joining pool workers') for p in pool: if p.is_alive(): # worker has not yet exited util.debug('cleaning up worker %d' % p.pid) p.join() def __enter__(self): return self def __exit__(self, exc_type, exc_val, exc_tb): self.terminate() # # Class whose instances are returned by `Pool.apply_async()` # class ApplyResult(object): def __init__(self, cache, callback, error_callback): self._event = threading.Event() self._job = next(job_counter) self._cache = cache self._callback = callback self._error_callback = error_callback cache[self._job] = self def ready(self): return self._event.is_set() def successful(self): assert self.ready() return self._success def wait(self, timeout=None): self._event.wait(timeout) def get(self, timeout=None): self.wait(timeout) if not self.ready(): raise TimeoutError if self._success: return self._value else: raise self._value def _set(self, i, obj): self._success, self._value = obj if self._callback and self._success: self._callback(self._value) if self._error_callback and not self._success: self._error_callback(self._value) self._event.set() del self._cache[self._job] AsyncResult = ApplyResult # create alias -- see #17805 # # Class whose instances are returned by `Pool.map_async()` # class MapResult(ApplyResult): def __init__(self, cache, chunksize, length, callback, error_callback): ApplyResult.__init__(self, cache, callback, error_callback=error_callback) self._success = True self._value = [None] * length self._chunksize = chunksize if chunksize <= 0: self._number_left = 0 self._event.set() del cache[self._job] else: self._number_left = length//chunksize + bool(length % chunksize) def _set(self, i, success_result): success, result = success_result if success: self._value[i*self._chunksize:(i+1)*self._chunksize] = result self._number_left -= 1 if self._number_left == 0: if self._callback: self._callback(self._value) del self._cache[self._job] self._event.set() else: self._success = False self._value = result if self._error_callback: self._error_callback(self._value) del self._cache[self._job] self._event.set() # # Class whose instances are returned by `Pool.imap()` # class IMapIterator(object): def __init__(self, cache): self._cond = threading.Condition(threading.Lock()) self._job = next(job_counter) self._cache = cache self._items = collections.deque() self._index = 0 self._length = None self._unsorted = {} cache[self._job] = self def __iter__(self): return self def next(self, timeout=None): self._cond.acquire() try: try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration self._cond.wait(timeout) try: item = self._items.popleft() except IndexError: if self._index == self._length: raise StopIteration raise TimeoutError finally: self._cond.release() success, value = item if success: return value raise value __next__ = next # XXX def _set(self, i, obj): self._cond.acquire() try: if self._index == i: self._items.append(obj) self._index += 1 while self._index in self._unsorted: obj = self._unsorted.pop(self._index) self._items.append(obj) self._index += 1 self._cond.notify() else: self._unsorted[i] = obj if self._index == self._length: del self._cache[self._job] finally: self._cond.release() def _set_length(self, length): self._cond.acquire() try: self._length = length if self._index == self._length: self._cond.notify() del self._cache[self._job] finally: self._cond.release() # # Class whose instances are returned by `Pool.imap_unordered()` # class IMapUnorderedIterator(IMapIterator): def _set(self, i, obj): self._cond.acquire() try: self._items.append(obj) self._index += 1 self._cond.notify() if self._index == self._length: del self._cache[self._job] finally: self._cond.release() # # # class ThreadPool(Pool): _wrap_exception = False @staticmethod def Process(*args, **kwds): from .dummy import Process return Process(*args, **kwds) def __init__(self, processes=None, initializer=None, initargs=()): Pool.__init__(self, processes, initializer, initargs) def _setup_queues(self): self._inqueue = queue.Queue() self._outqueue = queue.Queue() self._quick_put = self._inqueue.put self._quick_get = self._outqueue.get @staticmethod def _help_stuff_finish(inqueue, task_handler, size): # put sentinels at head of inqueue to make workers finish inqueue.not_empty.acquire() try: inqueue.queue.clear() inqueue.queue.extend([None] * size) inqueue.not_empty.notify_all() finally: inqueue.not_empty.release() # Copyright 2016 Sungard Availability Services # Copyright 2016 Red Hat # Copyright 2012 eNovance # Copyright 2013 IBM Corp # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from oslo_config import cfg from oslo_log import log from oslo_utils import timeutils from ceilometer.agent import plugin_base from ceilometer.i18n import _LW from ceilometer import neutron_client from ceilometer import sample LOG = log.getLogger(__name__) cfg.CONF.import_group('service_types', 'ceilometer.neutron_client') class FloatingIPPollster(plugin_base.PollsterBase): STATUS = { 'inactive': 0, 'active': 1, 'pending_create': 2, } def __init__(self): self.neutron_cli = neutron_client.Client() @property def default_discovery(self): return 'endpoint:%s' % cfg.CONF.service_types.neutron @staticmethod def _form_metadata_for_fip(fip): """Return a metadata dictionary for the fip usage data.""" metadata = { 'router_id': fip.get("router_id"), 'status': fip.get("status"), 'floating_network_id': fip.get("floating_network_id"), 'fixed_ip_address': fip.get("fixed_ip_address"), 'port_id': fip.get("port_id"), 'floating_ip_address': fip.get("floating_ip_address") } return metadata def get_samples(self, manager, cache, resources): for fip in self.neutron_cli.fip_get_all(): status = self.STATUS.get(fip['status'].lower()) if status is None: LOG.warning(_LW("Invalid status, skipping IP address %s") % fip['floating_ip_address']) continue res_metadata = self._form_metadata_for_fip(fip) yield sample.Sample( name='ip.floating', type=sample.TYPE_GAUGE, unit='ip', volume=status, user_id=fip.get('user_id'), project_id=fip['tenant_id'], resource_id=fip['id'], timestamp=timeutils.utcnow().isoformat(), resource_metadata=res_metadata ) # coding: utf-8 # # Copyright 2014 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Domain object for statistics models.""" __author__ = 'Sean Lip' import copy import operator import re from core.platform import models (stats_models,) = models.Registry.import_models([models.NAMES.statistics]) class StateCounter(object): """Domain object that keeps counts associated with states. All methods and properties in this file should be independent of the specific storage model used. """ def __init__(self, first_entry_count, subsequent_entries_count, resolved_answer_count, active_answer_count): self.first_entry_count = first_entry_count self.subsequent_entries_count = subsequent_entries_count self.resolved_answer_count = resolved_answer_count self.active_answer_count = active_answer_count @property def total_entry_count(self): """Total number of entries to the state.""" return self.first_entry_count + self.subsequent_entries_count @property def no_answer_count(self): """Number of times a reader left without entering an answer.""" return (self.first_entry_count + self.subsequent_entries_count - self.resolved_answer_count - self.active_answer_count) @classmethod def get(cls, exploration_id, state_name): state_counter_model = stats_models.StateCounterModel.get_or_create( exploration_id, state_name) return cls( state_counter_model.first_entry_count, state_counter_model.subsequent_entries_count, state_counter_model.resolved_answer_count, state_counter_model.active_answer_count ) class StateRuleAnswerLog(object): """Domain object that stores answers which match different state rules. All methods and properties in this file should be independent of the specific storage model used. """ def __init__(self, answers): # This dict represents a log of answers that hit this rule and that # have not been resolved. The keys of this dict are the answers encoded # as HTML strings, and the values are integer counts representing how # many times the answer has been entered. self.answers = copy.deepcopy(answers) @property def total_answer_count(self): """Total count of answers for this rule that have not been resolved.""" # TODO(sll): Cache this computed property. total_count = 0 for answer, count in self.answers.iteritems(): total_count += count return total_count @classmethod def get_multi(cls, exploration_id, rule_data): """Gets domain objects corresponding to the given rule data. Args: exploration_id: the exploration id rule_data: a list of dicts, each with the following keys: (state_name, handler_name, rule_str). """ # TODO(sll): Should each rule_str be unicode instead? answer_log_models = ( stats_models.StateRuleAnswerLogModel.get_or_create_multi( exploration_id, rule_data)) return [cls(answer_log_model.answers) for answer_log_model in answer_log_models] @classmethod def get(cls, exploration_id, state_name, handler_name, rule_str): # TODO(sll): Deprecate this method. return cls.get_multi(exploration_id, [{ 'state_name': state_name, 'handler_name': handler_name, 'rule_str': rule_str }])[0] def get_top_answers(self, N): """Returns the top N answers. Args: N: the maximum number of answers to return. Returns: A list of (answer, count) tuples for the N answers with the highest counts. """ return sorted( self.answers.iteritems(), key=operator.itemgetter(1), reverse=True)[:N] """ pgoapi - Pokemon Go API Copyright (c) 2016 tjado Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. Author: tjado """ import re import time import struct import ctypes import xxhash import logging from json import JSONEncoder from binascii import unhexlify # other stuff from google.protobuf.internal import encoder from geopy.geocoders import GoogleV3 from s2sphere import LatLng, Angle, Cap, RegionCoverer, math log = logging.getLogger(__name__) def f2i(float): return struct.unpack(' 1500: radius = 1500 # radius = 1500 is max allowed by the server region = Cap.from_axis_angle(LatLng.from_degrees(lat, long).to_point(), Angle.from_degrees(360*radius/(2*math.pi*EARTH_RADIUS))) coverer = RegionCoverer() coverer.min_level = 15 coverer.max_level = 15 cells = coverer.get_covering(region) cells = cells[:100] # len(cells) = 100 is max allowed by the server return sorted([x.id() for x in cells]) def get_time(ms=False): if ms: return int(round(time.time() * 1000)) else: return int(round(time.time())) def get_format_time_diff(low, high, ms=True): diff = (high - low) if ms: m, s = divmod(diff / 1000, 60) else: m, s = divmod(diff, 60) h, m = divmod(m, 60) return (h, m, s) def parse_api_endpoint(api_url): if not api_url.startswith("https"): api_url = 'https://{}/rpc'.format(api_url) return api_url class Rand48(object): def __init__(self, seed): self.n = seed def seed(self, seed): self.n = seed def srand(self, seed): self.n = (seed << 16) + 0x330e def next(self): self.n = (25214903917 * self.n + 11) & (2**48 - 1) return self.n def drand(self): return self.next() / 2**48 def lrand(self): return self.next() >> 17 def mrand(self): n = self.next() >> 16 if n & (1 << 31): n -= 1 << 32 return n def long_to_bytes(val, endianness='big'): """ Use :ref:`string formatting` and :func:`~binascii.unhexlify` to convert ``val``, a :func:`long`, to a byte :func:`str`. :param long val: The value to pack :param str endianness: The endianness of the result. ``'big'`` for big-endian, ``'little'`` for little-endian. If you want byte- and word-ordering to differ, you're on your own. Using :ref:`string formatting` lets us use Python's C innards. """ # one (1) hex digit per four (4) bits width = val.bit_length() # unhexlify wants an even multiple of eight (8) bits, but we don't # want more digits than we need (hence the ternary-ish 'or') width += 8 - ((width % 8) or 8) # format width specifier: four (4) bits per hex digit fmt = '%%0%dx' % (width // 4) # prepend zero (0) to the width, to zero-pad the output s = unhexlify(fmt % val) if endianness == 'little': # see http://stackoverflow.com/a/931095/309233 s = s[::-1] return s def generateLocation1(authticket, lat, lng, alt): firstHash = xxhash.xxh32(authticket, seed=0x1B845238).intdigest() locationBytes = d2h(lat) + d2h(lng) + d2h(alt) if not alt: alt = "\x00\x00\x00\x00\x00\x00\x00\x00" return xxhash.xxh32(locationBytes, seed=firstHash).intdigest() def generateLocation2(lat, lng, alt): locationBytes = d2h(lat) + d2h(lng) + d2h(alt) if not alt: alt = "\x00\x00\x00\x00\x00\x00\x00\x00" return xxhash.xxh32(locationBytes, seed=0x1B845238).intdigest() # Hash of location using static seed 0x1B845238 def generateRequestHash(authticket, request): firstHash = xxhash.xxh64(authticket, seed=0x1B845238).intdigest() return xxhash.xxh64(request, seed=firstHash).intdigest() def d2h(f): hex_str = f2h(f)[2:].replace('L', '') hex_str = ("0" * (len(hex_str) % 2)) + hex_str return unhexlify(hex_str) # coding: utf-8 from __future__ import unicode_literals from .common import InfoExtractor from ..compat import ( compat_parse_qs, compat_urllib_parse_unquote, compat_urllib_parse_urlparse, ) from ..utils import ( ExtractorError, unified_strdate, int_or_none, qualities, unescapeHTML, ) class OdnoklassnikiIE(InfoExtractor): _VALID_URL = r'https?://(?:(?:www|m|mobile)\.)?(?:odnoklassniki|ok)\.ru/(?:video(?:embed)?|web-api/video/moviePlayer)/(?P[\d-]+)' _TESTS = [{ # metadata in JSON 'url': 'http://ok.ru/video/20079905452', 'md5': '6ba728d85d60aa2e6dd37c9e70fdc6bc', 'info_dict': { 'id': '20079905452', 'ext': 'mp4', 'title': 'Культура меняет нас (прекрасный ролик!))', 'duration': 100, 'upload_date': '20141207', 'uploader_id': '330537914540', 'uploader': 'Виталий Добровольский', 'like_count': int, 'age_limit': 0, }, 'skip': 'Video has been blocked', }, { # metadataUrl 'url': 'http://ok.ru/video/63567059965189-0?fromTime=5', 'md5': '9676cf86eff5391d35dea675d224e131', 'info_dict': { 'id': '63567059965189-0', 'ext': 'mp4', 'title': 'Девушка без комплексов ...', 'duration': 191, 'upload_date': '20150518', 'uploader_id': '534380003155', 'uploader': '☭ Андрей Мещанинов ☭', 'like_count': int, 'age_limit': 0, 'start_time': 5, }, }, { # YouTube embed (metadataUrl, provider == USER_YOUTUBE) 'url': 'http://ok.ru/video/64211978996595-1', 'md5': '5d7475d428845cd2e13bae6f1a992278', 'info_dict': { 'id': '64211978996595-1', 'ext': 'mp4', 'title': 'Космическая среда от 26 августа 2015', 'description': 'md5:848eb8b85e5e3471a3a803dae1343ed0', 'duration': 440, 'upload_date': '20150826', 'uploader_id': '750099571', 'uploader': 'Алина П', 'age_limit': 0, }, }, { # YouTube embed (metadata, provider == USER_YOUTUBE, no metadata.movie.title field) 'url': 'http://ok.ru/video/62036049272859-0', 'info_dict': { 'id': '62036049272859-0', 'ext': 'mp4', 'title': 'МУЗЫКА ДОЖДЯ .', 'description': 'md5:6f1867132bd96e33bf53eda1091e8ed0', 'upload_date': '20120106', 'uploader_id': '473534735899', 'uploader': 'МARINA D', 'age_limit': 0, }, 'params': { 'skip_download': True, }, }, { 'url': 'http://ok.ru/web-api/video/moviePlayer/20079905452', 'only_matching': True, }, { 'url': 'http://www.ok.ru/video/20648036891', 'only_matching': True, }, { 'url': 'http://www.ok.ru/videoembed/20648036891', 'only_matching': True, }, { 'url': 'http://m.ok.ru/video/20079905452', 'only_matching': True, }, { 'url': 'http://mobile.ok.ru/video/20079905452', 'only_matching': True, }] def _real_extract(self, url): start_time = int_or_none(compat_parse_qs( compat_urllib_parse_urlparse(url).query).get('fromTime', [None])[0]) video_id = self._match_id(url) webpage = self._download_webpage( 'http://ok.ru/video/%s' % video_id, video_id) error = self._search_regex( r'[^>]+class="vp_video_stub_txt"[^>]*>([^<]+)<', webpage, 'error', default=None) if error: raise ExtractorError(error, expected=True) player = self._parse_json( unescapeHTML(self._search_regex( r'data-options=(?P["\'])(?P{.+?%s.+?})(?P=quote)' % video_id, webpage, 'player', group='player')), video_id) flashvars = player['flashvars'] metadata = flashvars.get('metadata') if metadata: metadata = self._parse_json(metadata, video_id) else: metadata = self._download_json( compat_urllib_parse_unquote(flashvars['metadataUrl']), video_id, 'Downloading metadata JSON') movie = metadata['movie'] # Some embedded videos may not contain title in movie dict (e.g. # http://ok.ru/video/62036049272859-0) thus we allow missing title # here and it's going to be extracted later by an extractor that # will process the actual embed. provider = metadata.get('provider') title = movie['title'] if provider == 'UPLOADED_ODKL' else movie.get('title') thumbnail = movie.get('poster') duration = int_or_none(movie.get('duration')) author = metadata.get('author', {}) uploader_id = author.get('id') uploader = author.get('name') upload_date = unified_strdate(self._html_search_meta( 'ya:ovs:upload_date', webpage, 'upload date', default=None)) age_limit = None adult = self._html_search_meta( 'ya:ovs:adult', webpage, 'age limit', default=None) if adult: age_limit = 18 if adult == 'true' else 0 like_count = int_or_none(metadata.get('likeCount')) info = { 'id': video_id, 'title': title, 'thumbnail': thumbnail, 'duration': duration, 'upload_date': upload_date, 'uploader': uploader, 'uploader_id': uploader_id, 'like_count': like_count, 'age_limit': age_limit, 'start_time': start_time, } if provider == 'USER_YOUTUBE': info.update({ '_type': 'url_transparent', 'url': movie['contentId'], }) return info quality = qualities(('mobile', 'lowest', 'low', 'sd', 'hd')) formats = [{ 'url': f['url'], 'ext': 'mp4', 'format_id': f['name'], 'quality': quality(f['name']), } for f in metadata['videos']] self._sort_formats(formats) info['formats'] = formats return info # -*- encoding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (c) 2011-2012 Daniel (Avanzosc) # 28/03/2012 # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## from osv import fields, osv from tools.translate import _ import wizard import pooler import Image class wizard_tire_scratch (wizard.interface): form1 = '''
''' form1_fields = { 'tire': { 'string': 'Tire', 'type': 'many2one', 'relation': 'stock.production.lot', 'required': True, 'readonly': True }, 'origin': { 'string': 'Origin', 'type': 'many2one', 'relation': 'stock.location', 'required': True, 'readonly': True }, 'destination': { 'string': 'Destination', 'type': 'many2one', 'relation': 'stock.location', 'required': True, 'readonly': True }, 'odometer': { 'string': 'Odometer', 'type': 'integer', }, } form2 = '''
''' form2_fields = {} def tire_init (self,cr,uid, data,context): move_data = {} pool = pooler.get_pool(cr.dbname) tire_obj = pool.get('stock.production.lot') move_obj = pool.get('stock.move') loc_obj = pool.get('stock.location') company_obj = pool.get('res.company') data_obj = pool.get('tire.stock.lot') tire_data_obj = data_obj tire = tire_obj.browse(cr,uid,data['id']) company=tire.company_id move_list = move_obj.search(cr,uid,[('prodlot_id','=',tire.id)]) locat_default = company.tire_stock destini = company.scratch.id if move_list == []: origin = locat_default.id else: loc_id = max(move_list) move= move_obj.browse(cr,uid, loc_id) origin = move.location_dest_id.id move_data={'tire':tire.id, 'origin': origin, 'destination': destini} return move_data def tire_scratch (self,cr,uid, data,context): pool = pooler.get_pool(cr.dbname) tire_obj = pool.get('stock.production.lot') move_obj = pool.get('stock.move') vehic_obj = pool.get('fleet.vehicles') loc_obj = pool.get('stock.location') company_obj = pool.get('res.company') tire_data_obj = pool.get('tire.stock.lot') tire = tire_obj.browse(cr,uid,data['form']['tire']) company=tire.company_id move_list = move_obj.search(cr,uid,[('prodlot_id','=',tire.id)]) destination = loc_obj.browse (cr,uid,data['form']['destination']) destination_name = destination.name origin = loc_obj.browse (cr,uid,data['form']['origin']) origin_name = origin.name #Comprobar si el origen es un vehiculo if origin.location_id: loc_parent_ori = origin.location_id.id if loc_parent_ori: vehic_list = vehic_obj.search(cr,uid,[('buslocat','=',loc_parent_ori)]) else : vehic_list = [] if vehic_list ==[]: ori_vehicle = False res = 'error' else: vehicle = vehic_obj.browse(cr,uid,vehic_list[0]) ori_vehicle = True res = 'moved' else: ori_vehicle = False res = 'moved' # Termina comprobación origen if ori_vehicle : # Origin = Vehicle if origin_name.endswith("-1"): update ={ 'f_l_tire' : False} elif origin_name.endswith("-2"): update ={ 'f_r_tire' : False} if vehicle.tires == 6: if origin_name.endswith("-3"): update ={ 'r_l_tire1' : False} elif origin_name.endswith("-4"): update ={ 'r_l_tire2' : False} elif origin_name.endswith("-5"): update ={ 'r_r_tire2' : False} elif origin_name.endswith("-6"): update ={ 'r_r_tire1' : False} elif vehicle.tires > 6: if origin_name.endswith("-3"): update ={ 'm_l_tire1' : False} elif origin_name.endswith("-4"): update ={ 'm_l_tire2' : False} elif origin_name.endswith("-5"): update ={ 'm_r_tire2' : False} elif origin_name.endswith("-6"): update ={ 'm_r_tire1' : False} elif origin_name.endswith("-7"): update ={ 'r_l_tire1' : False} elif origin_name.endswith("-8"): update ={ 'r_r_tire1' : False} vehic_obj.write(cr,uid,vehicle.id,update) #Datos movimiento product_id = tire.product_id # actualizar odometro rueda odometer = data['form']['odometer'] if move_list == []: odometer_text = str(data['form']['odometer']) tire_odometer = 1 if odometer_text == '0': odometer = 1 tire_val= {'tire_km' : tire_odometer,'odometers' : odometer_text} else: if ori_vehicle : loc_id = max(move_list) move= move_obj.browse(cr,uid, loc_id) result = int(odometer) - move.odometer tire_odometer = tire.tire_km + result if tire.odometers: odometer_text = tire.odometers + "\n" + str(data['form']['odometer']) else: odometer_text = str(data['form']['odometer']) tire_val= {'tire_km' : tire_odometer, 'odometers' : odometer_text} else: if tire.odometers: odometer_text = tire.odometers + "\n" + str(data['form']['odometer']) else: odometer_text = str(data['form']['odometer']) tire_val= {'odometers' : odometer_text} tire_obj.write(cr,uid, tire.id,tire_val) # Termina actualización odometro rueda #Datos rueda tire_data_list = tire_data_obj.search(cr,uid,[('lot_id','=',tire.id)]) if tire_data_list== []: tire_data_val={ 'name': origin.name + ' | ' + tire.name + ' => ' + destination.name, 'lot_id': tire.id, 'origin' : origin.id, 'destination': destination.id, # 'data':time.strftime('%Y-%m-%d %H:%M:%S'), 'odomold' : 0, 'odomnew' : 0, 'tire_km' : 0, 'tire_km_total': tire.tire_km } else : tire_data_id = max(tire_data_list) tire_data = tire_data_obj.browse(cr,uid,tire_data_id) tire_data_val={ 'name': origin.name + ' | ' + tire.name + ' => ' + destination.name, 'lot_id': tire.id, 'origin' : origin.id, 'destination': destination.id, # 'data':time.strftime('%Y-%m-%d %H:%M:%S'), } if ori_vehicle: # Update odometer from vehicle tire_data_val['odomold'] = tire_data.odomnew tire_data_val['odomnew'] = odometer tire_data_val['tire_km'] = odometer - tire_data.odomnew tire_data_val['tire_km_total'] = tire_data.tire_km_total + odometer - tire_data.odomnew else: tire_data_val['odomold'] = tire_data.odomnew tire_data_val['odomnew'] = odometer tire_data_val['tire_km'] = 0 tire_data_val['tire_km_total'] = tire.tire_km #Fin datos rueda #Datos movimiento move_data = {'product_id' : tire.product_id.id, 'name' : origin.name + ' | ' + tire.name + ' => ' + destination.name, 'location_id' : origin.id, 'product_uom': tire.product_id.product_tmpl_id.uom_id.id, 'prodlot_id' : tire.id, 'location_dest_id': destination.id, 'odometer': odometer } #actualiza movimiento move_id = move_obj.create(cr,uid,move_data) #Fin datos movimiento #Actualiza rueda tire_obj.write(cr,uid, tire.id,{'tire_km' : tire_data_val['tire_km_total'], 'odometers' : odometer_text}) #crear datos neumático move_data_reg = move_obj.browse(cr,uid,move_id) tire_data_val['data']= move_data_reg.date data_id= tire_data_obj.create(cr,uid,tire_data_val) #Fin datos rueda res = 'moved' return res states = { 'init': { 'actions': [tire_init], 'result': {'type': 'form', 'arch':form1, 'fields':form1_fields, 'state': [('end', 'Cancel','gtk-cancel'),('waste', 'Accept','gtk-ok')]} }, 'waste': { 'actions' : [], 'result': {'type': 'choice', 'next_state': tire_scratch} }, 'moved': { 'actions' : [], 'result': {'type': 'form', 'arch':form2, 'fields':form2_fields,'state': [('end', 'Accept','gtk-ok')]} } } wizard_tire_scratch('tire.scratch') ''' An elasticsearch query pass-through. Has auth control, so it is better than exposing your ES index directly. ''' import json, urllib2 from flask import Blueprint, request, abort, make_response from flask.ext.login import current_user import portality.models as models from portality.core import app import portality.util as util blueprint = Blueprint('query', __name__) # pass queries direct to index. POST only for receipt of complex query objects @blueprint.route('/', methods=['GET','POST']) @blueprint.route('/', methods=['GET','POST']) @util.jsonp def query(path='Record'): pathparts = path.strip('/').split('/') subpath = pathparts[0] if subpath.lower() in app.config.get('NO_QUERY',[]): abort(401) try: klass = getattr(models, subpath[0].capitalize() + subpath[1:] ) except: abort(404) if len(pathparts) > 1 and pathparts[1] == '_mapping': resp = make_response( json.dumps(klass().query(endpoint='_mapping')) ) elif len(pathparts) == 2 and pathparts[1] not in ['_mapping','_search']: if request.method == 'POST': abort(401) else: rec = klass().pull(pathparts[1]) if rec: if not current_user.is_anonymous() or (app.config.get('PUBLIC_ACCESSIBLE_JSON',True) and rec.data.get('visible',True) and rec.data.get('accessible',True)): resp = make_response( rec.json ) else: abort(401) else: abort(404) else: if request.method == "POST": if request.json: qs = request.json else: qs = dict(request.form).keys()[-1] elif 'q' in request.values: qs = {'query': {'query_string': { 'query': request.values['q'] }}} elif 'source' in request.values: qs = json.loads(urllib2.unquote(request.values['source'])) else: qs = {'query': {'match_all': {}}} for item in request.values: if item not in ['q','source','callback','_'] and isinstance(qs,dict): qs[item] = request.values[item] if 'sort' not in qs and app.config.get('DEFAULT_SORT',False): if path.lower() in app.config['DEFAULT_SORT'].keys(): qs['sort'] = app.config['DEFAULT_SORT'][path.lower()] if current_user.is_anonymous() and app.config.get('ANONYMOUS_SEARCH_TERMS',False): if path.lower() in app.config['ANONYMOUS_SEARCH_TERMS'].keys(): if 'bool' not in qs['query']: pq = qs['query'] qs['query'] = { 'bool':{ 'must': [ pq ] } } if 'must' not in qs['query']['bool']: qs['query']['bool']['must'] = [] qs['query']['bool']['must'] = qs['query']['bool']['must'] + app.config['ANONYMOUS_SEARCH_TERMS'][path.lower()] resp = make_response( json.dumps(klass().query(q=qs)) ) resp.mimetype = "application/json" return resp from __future__ import absolute_import, division, print_function import os import platform import socket import sys import textwrap from tornado.testing import bind_unused_port # Encapsulate the choice of unittest or unittest2 here. # To be used as 'from tornado.test.util import unittest'. if sys.version_info < (2, 7): # In py26, we must always use unittest2. import unittest2 as unittest # type: ignore else: # Otherwise, use whichever version of unittest was imported in # tornado.testing. from tornado.testing import unittest skipIfNonUnix = unittest.skipIf(os.name != 'posix' or sys.platform == 'cygwin', "non-unix platform") # travis-ci.org runs our tests in an overworked virtual machine, which makes # timing-related tests unreliable. skipOnTravis = unittest.skipIf('TRAVIS' in os.environ, 'timing tests unreliable on travis') skipOnAppEngine = unittest.skipIf('APPENGINE_RUNTIME' in os.environ, 'not available on Google App Engine') # Set the environment variable NO_NETWORK=1 to disable any tests that # depend on an external network. skipIfNoNetwork = unittest.skipIf('NO_NETWORK' in os.environ, 'network access disabled') skipIfNoIPv6 = unittest.skipIf(not socket.has_ipv6, 'ipv6 support not present') skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 (yield from) not available') skipBefore35 = unittest.skipIf(sys.version_info < (3, 5), 'PEP 492 (async/await) not available') skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython', 'Not CPython implementation') def refusing_port(): """Returns a local port number that will refuse all connections. Return value is (cleanup_func, port); the cleanup function must be called to free the port to be reused. """ # On travis-ci, port numbers are reassigned frequently. To avoid # collisions with other tests, we use an open client-side socket's # ephemeral port number to ensure that nothing can listen on that # port. server_socket, port = bind_unused_port() server_socket.setblocking(1) client_socket = socket.socket() client_socket.connect(("127.0.0.1", port)) conn, client_addr = server_socket.accept() conn.close() server_socket.close() return (client_socket.close, client_addr[1]) def exec_test(caller_globals, caller_locals, s): """Execute ``s`` in a given context and return the result namespace. Used to define functions for tests in particular python versions that would be syntax errors in older versions. """ # Flatten the real global and local namespace into our fake # globals: it's all global from the perspective of code defined # in s. global_namespace = dict(caller_globals, **caller_locals) # type: ignore local_namespace = {} exec(textwrap.dedent(s), global_namespace, local_namespace) return local_namespace def is_coverage_running(): """Return whether coverage is currently running. """ if 'coverage' not in sys.modules: return False tracer = sys.gettrace() if tracer is None: return False try: mod = tracer.__module__ except AttributeError: try: mod = tracer.__class__.__module__ except AttributeError: return False return mod.startswith('coverage') from py2js import convert_py2js import inspect class JavaScript(object): """ Decorator that you can use to convert methods to JavaScript. For example this code:: @JavaScript class TestClass(object): def __init__(self): alert('TestClass created') self.reset() def reset(self): self.value = 0 def inc(self): alert(self.value) self.value += 1 print str(TestClass) prints:: function TestClass() { return new _TestClass(); } function _TestClass() { this.__init__(); } _TestClass.__name__ = 'TestClass' _TestClass.prototype.__class__ = _TestClass _TestClass.prototype.__init__ = function() { alert("TestClass created"); this.reset(); } _TestClass.prototype.reset = function() { this.value = 0; } _TestClass.prototype.inc = function() { alert(this.value); this.value += 1; } Alternatively, an equivalent way is to use JavaScript() as a function: class TestClass(object): def __init__(self): alert('TestClass created') self.reset() def reset(self): self.value = 0 def inc(self): alert(self.value) self.value += 1 print str(JavaScript(TestClass)) If you want to call the original function/class as Python, use the following syntax:: js = JavaScript(TestClass) test_class = js() # Python instance of TestClass() will be created js_class = str(js) # A string with the JS code """ def __init__(self, obj): self._obj = obj obj_source = inspect.getsource(obj) self._js = convert_py2js(obj_source) def __str__(self): return self._js def __call__(self, *args, **kwargs): return self._obj(*args, **kwargs) # encoding: utf-8 import datetime from south.db import db from south.v2 import DataMigration from django.db import models from churchsource.configuration.models import Setting class Migration(DataMigration): def forwards(self, orm): s = Setting(skey='FACE_SEARCH_ENABLE', name='When True, enables check-in by facial recognition', stype='tf', value='True') s.save() def backwards(self, orm): "Write your backwards methods here." models = { 'configuration.setting': { 'Meta': {'ordering': "('skey',)", 'object_name': 'Setting'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'skey': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '75', 'db_index': 'True'}), 'stype': ('django.db.models.fields.CharField', [], {'max_length': '10'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '150', 'null': 'True', 'blank': 'True'}) } } complete_apps = ['configuration'] ## # Copyright 2009-2015 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), # the Hercules foundation (http://www.herculesstichting.be/in_English) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # http://github.com/hpcugent/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see . ## """ Easyconfig module that provides functionality for tweaking existing eaysconfig (.eb) files. @author: Stijn De Weirdt (Ghent University) @author: Dries Verdegem (Ghent University) @author: Kenneth Hoste (Ghent University) @author: Pieter De Baets (Ghent University) @author: Jens Timmerman (Ghent University) @author: Toon Willems (Ghent University) @author: Fotis Georgatos (Uni.Lu, NTUA) """ import copy import glob import os import re import tempfile from distutils.version import LooseVersion from vsc.utils import fancylogger from vsc.utils.missing import nub from easybuild.framework.easyconfig.default import get_easyconfig_parameter_default from easybuild.framework.easyconfig.easyconfig import EasyConfig, create_paths, process_easyconfig from easybuild.tools.build_log import EasyBuildError from easybuild.tools.filetools import read_file, write_file from easybuild.tools.module_naming_scheme.utilities import det_full_ec_version from easybuild.tools.robot import resolve_dependencies from easybuild.tools.toolchain import DUMMY_TOOLCHAIN_NAME from easybuild.tools.utilities import quote_str _log = fancylogger.getLogger('easyconfig.tweak', fname=False) EASYCONFIG_TEMPLATE = "TEMPLATE" def ec_filename_for(path): """ Return a suiting file name for the easyconfig file at , as determined by its contents. """ ec = EasyConfig(path, validate=False) fn = "%s-%s.eb" % (ec['name'], det_full_ec_version(ec)) return fn def tweak(easyconfigs, build_specs, targetdir=None): """Tweak list of easyconfigs according to provided build specifications.""" # make sure easyconfigs all feature the same toolchain (otherwise we *will* run into trouble) toolchains = nub(['%(name)s/%(version)s' % ec['ec']['toolchain'] for ec in easyconfigs]) if len(toolchains) > 1: raise EasyBuildError("Multiple toolchains featured in easyconfigs, --try-X not supported in that case: %s", toolchains) if 'name' in build_specs or 'version' in build_specs: # no recursion if software name/version build specification are included # in that case, do not construct full dependency graph orig_ecs = easyconfigs _log.debug("Software name/version found, so not applying build specifications recursively: %s" % build_specs) else: # build specifications should be applied to the whole dependency graph # obtain full dependency graph for specified easyconfigs # easyconfigs will be ordered 'top-to-bottom': toolchain dependencies and toolchain first _log.debug("Applying build specifications recursively (no software name/version found): %s" % build_specs) orig_ecs = resolve_dependencies(easyconfigs, retain_all_deps=True) # keep track of originally listed easyconfigs (via their path) listed_ec_paths = [ec['spec'] for ec in easyconfigs] # obtain full dependency graph for specified easyconfigs # easyconfigs will be ordered 'top-to-bottom': toolchain dependencies and toolchain first orig_ecs = resolve_dependencies(easyconfigs, retain_all_deps=True) # determine toolchain based on last easyconfigs toolchain = orig_ecs[-1]['ec']['toolchain'] _log.debug("Filtering using toolchain %s" % toolchain) # filter easyconfigs unless a dummy toolchain is used: drop toolchain and toolchain dependencies if toolchain['name'] != DUMMY_TOOLCHAIN_NAME: while orig_ecs[0]['ec']['toolchain'] != toolchain: orig_ecs = orig_ecs[1:] # generate tweaked easyconfigs, and continue with those instead tweaked_easyconfigs = [] for orig_ec in orig_ecs: new_ec_file = tweak_one(orig_ec['spec'], None, build_specs, targetdir=targetdir) # only return tweaked easyconfigs for easyconfigs which were listed originally # easyconfig files for dependencies are also generated but not included, and will be resolved via --robot if orig_ec['spec'] in listed_ec_paths: new_ecs = process_easyconfig(new_ec_file, build_specs=build_specs) tweaked_easyconfigs.extend(new_ecs) return tweaked_easyconfigs def tweak_one(src_fn, target_fn, tweaks, targetdir=None): """ Tweak an easyconfig file with the given list of tweaks, using replacement via regular expressions. Note: this will only work 'well-written' easyconfig files, i.e. ones that e.g. set the version once and then use the 'version' variable to construct the list of sources, and possibly other parameters that depend on the version (e.g. list of patch files, dependencies, version suffix, ...) The tweaks should be specified in a dictionary, with parameters and keys that map to the values to be set. Reads easyconfig file at path , and writes the tweaked easyconfig file to . If no target filename is provided, a target filepath is generated based on the contents of the tweaked easyconfig file. """ # read easyconfig file ectxt = read_file(src_fn) _log.debug("Contents of original easyconfig file, prior to tweaking:\n%s" % ectxt) # determine new toolchain if it's being changed keys = tweaks.keys() if 'toolchain_name' in keys or 'toolchain_version' in keys: # note: this assumes that the toolchain spec is single-line tc_regexp = re.compile(r"^\s*toolchain\s*=\s*(.*)$", re.M) res = tc_regexp.search(ectxt) if not res: raise EasyBuildError("No toolchain found in easyconfig file %s: %s", src_fn, ectxt) toolchain = eval(res.group(1)) for key in ['name', 'version']: tc_key = "toolchain_%s" % key if tc_key in keys: toolchain.update({key: tweaks[tc_key]}) tweaks.pop(tc_key) class TcDict(dict): """A special dict class that represents trivial toolchains properly.""" def __repr__(self): return "{'name': '%(name)s', 'version': '%(version)s'}" % self tweaks.update({'toolchain': TcDict({'name': toolchain['name'], 'version': toolchain['version']})}) _log.debug("New toolchain constructed: %s" % tweaks['toolchain']) additions = [] # automagically clear out list of checksums if software version is being tweaked if 'version' in tweaks and 'checksums' not in tweaks: tweaks['checksums'] = [] _log.warning("Tweaking version: checksums cleared, verification disabled.") # we need to treat list values seperately, i.e. we prepend to the current value (if any) for (key, val) in tweaks.items(): if isinstance(val, list): regexp = re.compile(r"^(?P\s*%s)\s*=\s*(?P\[(.|\n)*\])\s*$" % key, re.M) res = regexp.search(ectxt) if res: fval = [x for x in val if x != ''] # filter out empty strings # determine to prepend/append or overwrite by checking first/last list item # - input ending with comma (empty tail list element) => prepend # - input starting with comma (empty head list element) => append # - no empty head/tail list element => overwrite if not val: newval = '[]' _log.debug("Clearing %s to empty list (was: %s)" % (key, res.group('val'))) elif val[0] == '': newval = "%s + %s" % (res.group('val'), fval) _log.debug("Appending %s to %s" % (fval, key)) elif val[-1] == '': newval = "%s + %s" % (fval, res.group('val')) _log.debug("Prepending %s to %s" % (fval, key)) else: newval = "%s" % fval _log.debug("Overwriting %s with %s" % (key, fval)) ectxt = regexp.sub("%s = %s" % (res.group('key'), newval), ectxt) _log.info("Tweaked %s list to '%s'" % (key, newval)) elif get_easyconfig_parameter_default(key) != val: additions.append("%s = %s" % (key, val)) tweaks.pop(key) # add parameters or replace existing ones for (key, val) in tweaks.items(): regexp = re.compile(r"^(?P\s*%s)\s*=\s*(?P.*)$" % key, re.M) _log.debug("Regexp pattern for replacing '%s': %s" % (key, regexp.pattern)) res = regexp.search(ectxt) if res: # only tweak if the value is different diff = True try: _log.debug("eval(%s): %s" % (res.group('val'), eval(res.group('val')))) diff = eval(res.group('val')) != val except (NameError, SyntaxError): # if eval fails, just fall back to string comparison _log.debug("eval failed for \"%s\", falling back to string comparison against \"%s\"...", res.group('val'), val) diff = res.group('val') != val if diff: ectxt = regexp.sub("%s = %s" % (res.group('key'), quote_str(val)), ectxt) _log.info("Tweaked '%s' to '%s'" % (key, quote_str(val))) elif get_easyconfig_parameter_default(key) != val: additions.append("%s = %s" % (key, quote_str(val))) if additions: _log.info("Adding additional parameters to tweaked easyconfig file: %s" % additions) ectxt = '\n'.join([ectxt] + additions) _log.debug("Contents of tweaked easyconfig file:\n%s" % ectxt) # come up with suiting file name for tweaked easyconfig file if none was specified if target_fn is None: fn = None try: # obtain temporary filename fd, tmpfn = tempfile.mkstemp() os.close(fd) # write easyconfig to temporary file write_file(tmpfn, ectxt) # determine suiting filename fn = ec_filename_for(tmpfn) # get rid of temporary file os.remove(tmpfn) except OSError, err: raise EasyBuildError("Failed to determine suiting filename for tweaked easyconfig file: %s", err) if targetdir is None: targetdir = tempfile.gettempdir() target_fn = os.path.join(targetdir, fn) _log.debug("Generated file name for tweaked easyconfig file: %s" % target_fn) # write out tweaked easyconfig file write_file(target_fn, ectxt) _log.info("Tweaked easyconfig file written to %s" % target_fn) return target_fn def pick_version(req_ver, avail_vers): """Pick version based on an optionally desired version and available versions. If a desired version is specifed, the most recent version that is less recent than or equal to the desired version will be picked; else, the most recent version will be picked. This function returns both the version to be used, which is equal to the required version if it was specified, and the version picked that matches that closest. @param req_ver: required version @param avail_vers: list of available versions """ if not avail_vers: raise EasyBuildError("Empty list of available versions passed.") selected_ver = None if req_ver: # if a desired version is specified, # retain the most recent version that's less recent or equal than the desired version ver = req_ver if len(avail_vers) == 1: selected_ver = avail_vers[0] else: retained_vers = [v for v in avail_vers if v <= LooseVersion(ver)] if retained_vers: selected_ver = retained_vers[-1] else: # if no versions are available that are less recent, take the least recent version selected_ver = sorted([LooseVersion(v) for v in avail_vers])[0] else: # if no desired version is specified, just use last version ver = avail_vers[-1] selected_ver = ver return (ver, selected_ver) def find_matching_easyconfigs(name, installver, paths): """ Find easyconfigs that match specified name/installversion in specified list of paths. @param name: software name @param installver: software install version (which includes version, toolchain, versionprefix/suffix, ...) @param paths: list of paths to search easyconfigs in """ ec_files = [] for path in paths: patterns = create_paths(path, name, installver) for pattern in patterns: more_ec_files = filter(os.path.isfile, sorted(glob.glob(pattern))) _log.debug("Including files that match glob pattern '%s': %s" % (pattern, more_ec_files)) ec_files.extend(more_ec_files) # only retain unique easyconfig paths return nub(ec_files) def select_or_generate_ec(fp, paths, specs): """ Select or generate an easyconfig file with the given requirements, from existing easyconfig files. If easyconfig files are available for the specified software package, then this function will first try to determine which toolchain to use. * if a toolchain is given, it will use it (possible using a template easyconfig file as base); * if not, and only a single toolchain is available, is will assume it can use that toolchain * else, it fails -- EasyBuild doesn't select between multiple available toolchains Next, it will trim down the selected easyconfig files to a single one, based on the following requirements (in order of preference): * toolchain version * software version * other parameters (e.g. versionprefix, versionsuffix, etc.) If a complete match is found, it will return that easyconfig. Else, it will generate a new easyconfig file based on the selected 'best matching' easyconfig file. """ specs = copy.deepcopy(specs) # ensure that at least name is specified if not specs.get('name'): raise EasyBuildError("Supplied 'specs' dictionary doesn't even contain a name of a software package?") name = specs['name'] handled_params = ['name'] # find ALL available easyconfig files for specified software cfg = { 'version': '*', 'toolchain': {'name': DUMMY_TOOLCHAIN_NAME, 'version': '*'}, 'versionprefix': '*', 'versionsuffix': '*', } installver = det_full_ec_version(cfg) ec_files = find_matching_easyconfigs(name, installver, paths) _log.debug("Unique ec_files: %s" % ec_files) # we need at least one config file to start from if len(ec_files) == 0: # look for a template file if no easyconfig for specified software name is available for path in paths: templ_file = os.path.join(path, "%s.eb" % EASYCONFIG_TEMPLATE) if os.path.isfile(templ_file): ec_files = [templ_file] break else: _log.debug("No template found at %s." % templ_file) if len(ec_files) == 0: raise EasyBuildError("No easyconfig files found for software %s, and no templates available. " "I'm all out of ideas.", name) ecs_and_files = [(EasyConfig(f, validate=False), f) for f in ec_files] # TOOLCHAIN NAME # we can't rely on set, because we also need to be able to obtain a list of unique lists def unique(l): """Retain unique elements in a sorted list.""" l = sorted(l) if len(l) > 1: l2 = [l[0]] for x in l: if not x == l2[-1]: l2.append(x) return l2 else: return l # determine list of unique toolchain names tcnames = unique([x[0]['toolchain']['name'] for x in ecs_and_files]) _log.debug("Found %d unique toolchain names: %s" % (len(tcnames), tcnames)) # if a toolchain was selected, and we have no easyconfig files for it, try and use a template if specs.get('toolchain_name') and not specs['toolchain_name'] in tcnames: if EASYCONFIG_TEMPLATE in tcnames: _log.info("No easyconfig file for specified toolchain, but template is available.") else: raise EasyBuildError("No easyconfig file for %s with toolchain %s, and no template available.", name, specs['toolchain_name']) tcname = specs.pop('toolchain_name', None) handled_params.append('toolchain_name') # trim down list according to selected toolchain if tcname in tcnames: # known toolchain, so only retain those selected_tcname = tcname else: if len(tcnames) == 1 and not tcnames[0] == EASYCONFIG_TEMPLATE: # only one (non-template) toolchain availble, so use that tcname = tcnames[0] selected_tcname = tcname elif len(tcnames) == 1 and tcnames[0] == EASYCONFIG_TEMPLATE: selected_tcname = tcnames[0] else: # fall-back: use template toolchain if a toolchain name was specified if tcname: selected_tcname = EASYCONFIG_TEMPLATE else: # if multiple toolchains are available, and none is specified, we quit # we can't just pick one, how would we prefer one over the other? raise EasyBuildError("No toolchain name specified, and more than one available: %s.", tcnames) _log.debug("Filtering easyconfigs based on toolchain name '%s'..." % selected_tcname) ecs_and_files = [x for x in ecs_and_files if x[0]['toolchain']['name'] == selected_tcname] _log.debug("Filtered easyconfigs: %s" % [x[1] for x in ecs_and_files]) # TOOLCHAIN VERSION tcvers = unique([x[0]['toolchain']['version'] for x in ecs_and_files]) _log.debug("Found %d unique toolchain versions: %s" % (len(tcvers), tcvers)) tcver = specs.pop('toolchain_version', None) handled_params.append('toolchain_version') (tcver, selected_tcver) = pick_version(tcver, tcvers) _log.debug("Filtering easyconfigs based on toolchain version '%s'..." % selected_tcver) ecs_and_files = [x for x in ecs_and_files if x[0]['toolchain']['version'] == selected_tcver] _log.debug("Filtered easyconfigs: %s" % [x[1] for x in ecs_and_files]) # add full toolchain specification to specs if tcname and tcver: specs.update({'toolchain': {'name': tcname, 'version': tcver}}) handled_params.append('toolchain') else: if tcname: specs.update({'toolchain_name': tcname}) if tcver: specs.update({'toolchain_version': tcver}) # SOFTWARE VERSION vers = unique([x[0]['version'] for x in ecs_and_files]) _log.debug("Found %d unique software versions: %s" % (len(vers), vers)) ver = specs.pop('version', None) handled_params.append('version') (ver, selected_ver) = pick_version(ver, vers) if ver: specs.update({'version': ver}) _log.debug("Filtering easyconfigs based on software version '%s'..." % selected_ver) ecs_and_files = [x for x in ecs_and_files if x[0]['version'] == selected_ver] _log.debug("Filtered easyconfigs: %s" % [x[1] for x in ecs_and_files]) # go through parameters specified via --amend # always include versionprefix/suffix, because we might need it to generate a file name verpref = None versuff = None other_params = {'versionprefix': None, 'versionsuffix': None} for (param, val) in specs.items(): if not param in handled_params: other_params.update({param: val}) _log.debug("Filtering based on other parameters (specified via --amend): %s" % other_params) for (param, val) in other_params.items(): if param in ecs_and_files[0][0]._config: vals = unique([x[0][param] for x in ecs_and_files]) else: vals = [] filter_ecs = False # try and select a value from the available ones, or fail if we can't if val in vals: # if the specified value is available, use it selected_val = val _log.debug("Specified %s is available, so using it: %s" % (param, selected_val)) filter_ecs = True elif val: # if a value is specified, use that, even if it's not available yet selected_val = val # promote value to list if deemed appropriate if vals and type(vals[0]) == list and not type(val) == list: _log.debug("Promoting type of %s value to list, since original value was." % param) specs[param] = [val] _log.debug("%s is specified, so using it (even though it's not available yet): %s" % (param, selected_val)) elif len(vals) == 1: # if only one value is available, use that selected_val = vals[0] _log.debug("Only one %s available ('%s'), so picking that" % (param, selected_val)) filter_ecs = True else: # otherwise, we fail, because we don't know how to pick between different fixes raise EasyBuildError("No %s specified, and can't pick from available ones: %s", param, vals) if filter_ecs: _log.debug("Filtering easyconfigs based on %s '%s'..." % (param, selected_val)) ecs_and_files = [x for x in ecs_and_files if x[0][param] == selected_val] _log.debug("Filtered easyconfigs: %s" % [x[1] for x in ecs_and_files]) # keep track of versionprefix/suffix if param == "versionprefix": verpref = selected_val elif param == "versionsuffix": versuff = selected_val cnt = len(ecs_and_files) if not cnt == 1: fs = [x[1] for x in ecs_and_files] raise EasyBuildError("Failed to select a single easyconfig from available ones, %s left: %s", cnt, fs) else: (selected_ec, selected_ec_file) = ecs_and_files[0] # check whether selected easyconfig matches requirements match = True for (key, val) in specs.items(): if key in selected_ec._config: # values must be equal to have a full match if selected_ec[key] != val: match = False else: # if we encounter a key that is not set in the selected easyconfig, we don't have a full match match = False # if it matches, no need to tweak if match: _log.info("Perfect match found: %s" % selected_ec_file) return (False, selected_ec_file) # GENERATE # if no file path was specified, generate a file name if fp is None: cfg = { 'version': ver, 'toolchain': {'name': tcname, 'version': tcver}, 'versionprefix': verpref, 'versionsuffix': versuff, } installver = det_full_ec_version(cfg) fp = "%s-%s.eb" % (name, installver) # generate tweaked easyconfig file tweak_one(selected_ec_file, fp, specs) _log.info("Generated easyconfig file %s, and using it to build the requested software." % fp) return (True, fp) def obtain_ec_for(specs, paths, fp=None): """ Obtain an easyconfig file to the given specifications. Either select between available ones, or use the best suited available one to generate a new easyconfig file. @param specs: list of available easyconfig files @param paths: a list of paths where easyconfig files can be found @param fp: the desired file name """ # ensure that at least name is specified if not specs.get('name'): raise EasyBuildError("Supplied 'specs' dictionary doesn't even contain a name of a software package?") # collect paths to search in if not paths: raise EasyBuildError("No paths to look for easyconfig files, specify a path with --robot.") # select best easyconfig, or try to generate one that fits the requirements res = select_or_generate_ec(fp, paths, specs) if res: return res else: raise EasyBuildError("No easyconfig found for requested software, and also failed to generate one.") data_sets = { '3k_Disordered' : ('3k_run10_10us.35fs-DPPC.10-DOPC.70-CHOL.20.dir', 'ece75b704ec63ac9c39afd74b63497dc'), '3k_Ordered' : ('3k_run32_10us.35fs-DPPC.50-DOPC.10-CHOL.40.dir', '211e1bcf46a3f19a978e4af63f067ce0'), '3k_Ordered_and_gel' : ('3k_run43_10us.35fs-DPPC.70-DOPC.10-CHOL.20.dir', '87032ff78e4d01739aef5c6c0f5e4f04'), '6k_Disordered' : ('6k_run10_25us.35fs-DPPC.10-DOPC.70-CHOL.20.dir', '13404cb8225819577e4821a976e9203b'), '6k_Ordered' : ('6k_run32_25us.35fs-DPPC.50-DOPC.10-CHOL.40.dir', '95ef068b8deb69302c97f104b631d108'), '6k_Ordered_and_gel' : ('6k_run43_25us.35fs-DPPC.70-DOPC.10-CHOL.20.dir', '3353e86d1cc2670820678c4c0c356206') } from collections import OrderedDict def gen_data_set_dict(): # Generating names for the data set names= {'x' : 0, 'y' : 1, 'z' : 2, 'CHOL' : 3, 'DPPC' : 4, 'DIPC' : 5, 'Head' : 6, 'Tail' : 7} for i in range(12): temp = 'BL'+str(i+1) names.update({temp : i+8}) # dictionary sorted by value fields=OrderedDict(sorted(names.items(), key=lambda t: t[1])) return fields # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## from openerp.osv import fields, osv from openerp.tools.translate import _ class account_fiscalyear_close_state(osv.osv_memory): """ Closes Account Fiscalyear """ _name = "account.fiscalyear.close.state" _description = "Fiscalyear Close state" _columns = { 'fy_id': fields.many2one('account.fiscalyear', \ 'Fiscal Year to Close', required=True, help="Select a fiscal year to close"), } def data_save(self, cr, uid, ids, context=None): """ This function close account fiscalyear @param cr: the current row, from the database cursor, @param uid: the current user’s ID for security checks, @param ids: List of Account fiscalyear close state’s IDs """ journal_period_obj = self.pool.get('account.journal.period') period_obj = self.pool.get('account.period') fiscalyear_obj = self.pool.get('account.fiscalyear') account_move_obj = self.pool.get('account.move') for data in self.read(cr, uid, ids, context=context): fy_id = data['fy_id'][0] account_move_ids = account_move_obj.search(cr, uid, [('period_id.fiscalyear_id', '=', fy_id), ('state', '=', "draft")], context=context) if account_move_ids: raise osv.except_osv(_('Invalid Action!'), _('In order to close a fiscalyear, you must first post related journal entries.')) cr.execute('UPDATE account_journal_period ' \ 'SET state = %s ' \ 'WHERE period_id IN (SELECT id FROM account_period \ WHERE fiscalyear_id = %s)', ('done', fy_id)) cr.execute('UPDATE account_period SET state = %s ' \ 'WHERE fiscalyear_id = %s', ('done', fy_id)) cr.execute('UPDATE account_fiscalyear ' \ 'SET state = %s WHERE id = %s', ('done', fy_id)) self.invalidate_cache(cr, uid, context=context) return {'type': 'ir.actions.act_window_close'} # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: #!/usr/bin/python # # Copyright 2018-2021 Polyaxon, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from marshmallow import fields from polyaxon.schemas.base import BaseCamelSchema, BaseConfig class AuthSchema(BaseCamelSchema): enabled = fields.Bool(allow_none=True) external = fields.Str(allow_none=True) use_resolver = fields.Bool(allow_none=True) @staticmethod def schema_config(): return AuthConfig class AuthConfig(BaseConfig): SCHEMA = AuthSchema REDUCED_ATTRIBUTES = ["enabled", "external", "useResolver"] def __init__(self, enabled=None, external=None, use_resolver=None): self.enabled = enabled self.external = external self.use_resolver = use_resolver import os from .exceptions import * GLOBAL_OPTS = set([ 'help_is_utf8', 'mid', 'marking', 'asis', 'force', 'nolocks', 'nomodcheck', 'nocurdir', 'nobackups', 'nodeadjoe', 'break_hardlinks', 'break_links', 'lightoff', 'exask', 'beep', 'nosta', 'keepup', 'pg', 'undo_keep', 'csmode', 'backpath', 'floatmouse', 'rtbutton', 'nonotice', 'noexmsg', 'noxon', 'orphan', 'dopadding', 'lines', 'baud', 'columns', 'help', 'skiptop', 'notite', 'nolinefeeds', 'usetabs', 'assume_color', 'assume_256color', 'guess_non_utf8', 'guess_utf8', 'guess_utf16', 'guess_crlf', 'guess_indent', 'menu_above', 'transpose', 'menu_explorer', 'menu_jump', 'notagsmenu', 'icase', 'wrap', 'autoswap', 'joe_state', 'mouse', 'joexterm', 'brpaste', 'pastehack', 'square', 'text_color', 'status_color', 'help_color', 'menu_color', 'prompt_color', 'msg_color', 'restore', 'search_prompting', 'regex', 'lmsg', 'rmsg', 'smsg', 'zmsg', 'xmsg', 'highlight', 'istep', 'wordwrap', 'autoindent' ]) FILE_OPTS = set([ 'cpara', 'cnotpara', 'encoding', 'syntax', 'hex', 'highlight', 'smarthome', 'indentfirst', 'smartbacks', 'tab', 'indentc', 'spaces', 'istep', 'purify', 'crlf', 'wordwrap', 'nobackup', 'autoindent', 'overwrite', 'picture', 'lmargin', 'rmargin', 'flowed', 'french', 'linums', 'rdonly', 'keymap', 'lmsg', 'rmsg', 'mfirst', 'mnew', 'mold', 'msnew', 'msold', 'highlighter_context', 'single_quoted', 'no_double_quoted', 'c_comment', 'cpp_comment', 'pound_comment', 'vhdl_comment', 'semi_comment', 'tex_comment', 'text_delimiters', ]) OPTS_WITH_ARGS = set([ # Global 'undo_keep', 'backpath', 'lines', 'baud', 'columns', 'skiptop', 'text_color', 'status_color', 'help_color', 'menu_color', 'prompt_color', 'msg_color', 'lmsg', 'rmsg', 'smsg', 'zmsg', # File 'cpara', 'cnotpara', 'encoding', 'syntax', 'tab', 'indentc', 'istep', 'lmargin', 'rmargin', 'keymap', 'mfirst', 'mnew', 'mold', 'msnew', 'msold', 'text_delimiters' ]) class RCFile(object): def __init__(self): self.globalopts = Options(GLOBAL_OPTS) self.fileopts = [] self.help = [] self.menus = [] self.macros = [] self.bindings = [] def serialize(self): result = [] result.extend(self.globalopts.serialize()) for section in (self.fileopts, self.help, self.menus, self.macros, self.bindings): for item in section: result.extend(item.serialize()) return b'\n'.join((item.encode('utf-8') if isinstance(item, str) else item) for item in result) def clone(self): other = RCFile() other.globalopts = self.globalopts.clone() other.fileopts = [fopt.clone() for fopt in self.fileopts] other.help = [help.clone() for help in self.help] other.menus = [menu.clone() for menu in self.menus] other.macros = [macro.clone() for macro in self.macros] other.bindings = [binding.clone() for binding in self.bindings] return other class Options(object): def __init__(self, properties): self._properties = properties self._values = {} def __getattr__(self, name): return self.getValue(name) def __setattr__(self, name, value): if name.startswith('_'): object.__setattr__(self, name, value) else: self.setValue(name, value) def getValue(self, name): if name not in self._properties: raise InvalidProperty(name) if name not in self._values: return None else: return self._values[name] def setValue(self, name, value): if name not in self._properties: raise InvalidProperty(name) else: if (name in OPTS_WITH_ARGS) == isinstance(value, bool): raise InvalidPropertyValue(name) self._values[name] = value def serialize(self): result = [] for k, v in self._values.items(): if v is True: result.append('-' + k) elif v is False: result.append('--' + k) elif v is not None: result.append('-%s %s' % (k, v)) return result def clone(self): other = Options(self._properties) other._values.update(self._values) return other class FileOptions(object): def __init__(self): self.name = '' self.extensions = [] self.patterns = [] self.options = Options(FILE_OPTS) def serialize(self): result = [] result.append('[%s]' % self.name) result.extend(self.extensions) result.extend('+' + pat for pat in self.patterns) result.extend(self.options.serialize()) return result def clone(self): other = FileOptions() other.name = self.name other.extensions = self.extensions[:] other.patterns = self.patterns[:] other.options = self.options.clone() return other class Menu(object): def __init__(self): self.name = '' self.back = '' self.items = [] def serialize(self): result = [':defmenu %s %s' % (self.name, self.back)] result.extend(item.serialize() for item in self.items) return result def clone(self): other = Menu() other.name = self.name other.back = self.back other.items = [item.clone() for item in self.items] return other class MenuItem(object): def __init__(self): self.macro = '' self.label = '' def serialize(self): return '%s\t%s' % (self.macro, self.label) def clone(self): other = MenuItem() other.macro = self.macro other.label = self.label return other class HelpScreen(object): def __init__(self): self.name = '' self.content = [] def serialize(self): return ['{' + self.name] + self.content + ['}'] def clone(self): other = HelpScreen() other.name = self.name other.content = self.content[:] return other class KeyBindingCollection(object): def __init__(self): self.name = '' self.inherits = None self.bindings = [] def serialize(self): if not self.name: # Uninitialized return [] result = [':' + self.name] if self.inherits is not None: result.append(':inherit ' + self.inherits) result.extend([f.serialize() for f in self.bindings]) return result def clone(self): other = KeyBindingCollection() other.name = self.name other.inherits = self.inherits other.bindings = [b.clone() for b in self.bindings] return other class Binding(object): def __init__(self): self.macro = None self.keys = [] def serialize(self): return self.macro + ' ' + ' '.join(self.keys) def clone(self): other = Binding() other.macro = self.macro other.keys = self.keys[:] return other class MacroDefinition(object): def __init__(self): self.name = None self.macro = None def serialize(self): return [':def %s %s' % (self.name, self.macro)] def clone(self): other = MacroDefinition() other.name = self.name other.macro = self.macro return other class ParserState(object): def __init__(self, rcfile, filegen): self.rcfile = rcfile self.file = filegen self.curline = None def begin(self): try: self.parseglobal() self.parsefileopts() self.parsemenus() self.parsehelp() self.parsebindings() except StopIteration: pass def parseglobal(self): while True: line = self.nextnows() if line.startswith('-'): self.parseoption(self.rcfile.globalopts) else: break def parseoption(self, opts): mode = not self.curline.startswith('--') parts = self.curline.split(None, 1) optionName = parts[0][1:] if len(parts) == 1 or optionName not in OPTS_WITH_ARGS: opts.setValue(optionName, mode) else: opts.setValue(optionName, self.curline[len(parts[0]) + 1:].rstrip('\r\n')) def parsemacro(self, line): i = 0 q = False bs = False while i < len(line): c = line[i] if q: if bs: bs = False elif c == '\\': bs = True elif c == '"': q = False elif c == '"': q = True elif c.isspace(): return line[:i], line[i:].lstrip() i += 1 return line, '' def parsefileopts(self): while self.curline.startswith('['): filetype = FileOptions() filetype.name = self.curline.strip().strip('[]') while True: line = self.nextnows() if line.startswith('*'): filetype.extensions.append(line.strip()) elif line.startswith('+'): filetype.patterns.append(line[1:].strip()) elif line.startswith('-'): self.parseoption(filetype.options) else: break self.rcfile.fileopts.append(filetype) def parsemenus(self): while self.curline.startswith(':defmenu'): menu = Menu() parts = self.curline.strip().split(None, 2) menu.name = parts[1] if len(parts) == 3: menu.back = parts[2] while True: line = self.nextnows() if line.startswith(':') or line.startswith('{'): break macro, rest = self.parsemacro(line) item = MenuItem() item.macro = macro item.label = rest.strip() menu.items.append(item) self.rcfile.menus.append(menu) def parsehelp(self): while self.curline.startswith('{'): screen = HelpScreen() screen.name = self.curline[1:].strip() while not self.nextbytes().startswith(b'}'): screen.content.append(self.curline.rstrip(b'\r\n')) self.rcfile.help.append(screen) self.nextnows() def parsebindings(self): currentSection = None while True: if self.curline.startswith(':def '): # Macro macro = MacroDefinition() _def, macro.name, macro.macro = self.curline.split(None, 2) self.rcfile.macros.append(macro) elif self.curline.startswith(':inherit '): # Inheritance specification currentSection.inherits = self.curline[len(':inherit '):].strip() elif self.curline.startswith(':'): # New section currentSection = KeyBindingCollection() self.rcfile.bindings.append(currentSection) parts = self.curline.split() currentSection.name = parts[0][1:] else: # Binding binding = Binding() binding.macro, keystr = self.parsemacro(self.curline) # Split out keys keys = keystr.split() for k in keys: if self.iskey(k): binding.keys.append(k) else: break currentSection.bindings.append(binding) self.nextnows() def iskey(self, k): if len(k) == 1: return True if k.startswith('U+'): return True if k.startswith('^') and len(k) == 2: return True if k.startswith('.k') and len(k) == 3: return True if k in ('MDOWN', 'MDRAG', 'MUP', 'M2DOWN', 'M2DRAG', 'M2UP', 'M3DOWN', 'M3DRAG', 'M3UP, MWDOWN', 'MWUP', 'SP', 'TO'): return True return False def nextbytes(self): self.curline = next(self.file) return self.curline def next(self): self.curline = next(self.file).decode('utf-8').strip('\r\n') return self.curline def nextnows(self): while True: line = self.next() if len(line.strip()) > 0 and not line[0].isspace(): return line def readFile(filename): with open(filename, 'rb') as f: for line in f: if line.startswith(b':include'): args = line.decode('utf-8').split() for included in readFile(os.path.join(os.path.dirname(filename), args[1])): yield included else: yield line def parse(filename): result = RCFile() ParserState(result, readFile(filename)).begin() return result """ OrderedDict variants of the default base classes. """ from collections import OrderedDict from .graph import Graph from .multigraph import MultiGraph from .digraph import DiGraph from .multidigraph import MultiDiGraph __all__ = [] __all__.extend([ 'OrderedGraph', 'OrderedDiGraph', 'OrderedMultiGraph', 'OrderedMultiDiGraph', ]) class OrderedGraph(Graph): node_dict_factory = OrderedDict adjlist_outer_dict_factory = OrderedDict adjlist_inner_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedDiGraph(DiGraph): node_dict_factory = OrderedDict adjlist_outer_dict_factory = OrderedDict adjlist_inner_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedMultiGraph(MultiGraph): node_dict_factory = OrderedDict adjlist_outer_dict_factory = OrderedDict adjlist_inner_dict_factory = OrderedDict edge_key_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict class OrderedMultiDiGraph(MultiDiGraph): node_dict_factory = OrderedDict adjlist_outer_dict_factory = OrderedDict adjlist_inner_dict_factory = OrderedDict edge_key_dict_factory = OrderedDict edge_attr_dict_factory = OrderedDict """Support for OpenWRT (luci) routers.""" import logging from openwrt_luci_rpc import OpenWrtRpc import voluptuous as vol from homeassistant.components.device_tracker import ( DOMAIN, PLATFORM_SCHEMA as PARENT_PLATFORM_SCHEMA, DeviceScanner, ) from homeassistant.const import ( CONF_HOST, CONF_PASSWORD, CONF_SSL, CONF_USERNAME, CONF_VERIFY_SSL, ) import homeassistant.helpers.config_validation as cv _LOGGER = logging.getLogger(__name__) DEFAULT_SSL = False DEFAULT_VERIFY_SSL = True PLATFORM_SCHEMA = PARENT_PLATFORM_SCHEMA.extend( { vol.Required(CONF_HOST): cv.string, vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string, vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean, vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean, } ) def get_scanner(hass, config): """Validate the configuration and return a Luci scanner.""" scanner = LuciDeviceScanner(config[DOMAIN]) return scanner if scanner.success_init else None class LuciDeviceScanner(DeviceScanner): """This class scans for devices connected to an OpenWrt router.""" def __init__(self, config): """Initialize the scanner.""" self.router = OpenWrtRpc( config[CONF_HOST], config[CONF_USERNAME], config[CONF_PASSWORD], config[CONF_SSL], config[CONF_VERIFY_SSL], ) self.last_results = {} self.success_init = self.router.is_logged_in() def scan_devices(self): """Scan for new devices and return a list with found device IDs.""" self._update_info() return [device.mac for device in self.last_results] def get_device_name(self, device): """Return the name of the given device or None if we don't know.""" name = next( (result.hostname for result in self.last_results if result.mac == device), None, ) return name def get_extra_attributes(self, device): """ Get extra attributes of a device. Some known extra attributes that may be returned in the device tuple include MAC address (mac), network device (dev), IP address (ip), reachable status (reachable), associated router (host), hostname if known (hostname) among others. """ device = next( (result for result in self.last_results if result.mac == device), None ) return device._asdict() def _update_info(self): """Check the Luci router for devices.""" result = self.router.get_all_connected_devices(only_reachable=True) _LOGGER.debug("Luci get_all_connected_devices returned: %s", result) last_results = [] for device in result: if ( not hasattr(self.router.router.owrt_version, "release") or not self.router.router.owrt_version.release or self.router.router.owrt_version.release[0] < 19 or device.reachable ): last_results.append(device) self.last_results = last_results import os import sys from io import StringIO from django.core.management import call_command from django.db import connection, migrations def create_countries(apps, schema_editor): PoliticalDivision = apps.get_model("enhydris", "PoliticalDivision") if PoliticalDivision.objects.exists(): sys.stderr.write( "PoliticalDivision already has records. Apparently this is\n" "an old database being upgraded. I'm not adding countries.\n" ) return dirname = os.path.dirname(os.path.abspath(__file__)) countries_file = os.path.join(dirname, "0002-iso-3166-1-alpha-2.txt") pk = 0 with open(countries_file) as f: in_preamble = True for line in f: # Skip to blank line if in_preamble: if not line.strip(): in_preamble = False continue pk += 1 name, code = line.strip().split(";") short_name = name[:51] PoliticalDivision.objects.create( id=pk, name=name, short_name=short_name, code=code ) # Reset the id sequence sqlsequencereset = StringIO() call_command("sqlsequencereset", "enhydris", "--no-color", stdout=sqlsequencereset) sqlsequencereset.seek(0) reset_sequence = [line for line in sqlsequencereset if '"enhydris_gentity"' in line] assert len(reset_sequence) == 1 with connection.cursor() as cursor: cursor.execute(reset_sequence[0]) def create_interval_types(apps, schema_editor): IntervalType = apps.get_model("enhydris", "IntervalType") if IntervalType.objects.exists(): sys.stderr.write( "IntervalType already has records. Apparently this is\n" "an old database being upgraded. I'm not adding interval types.\n" ) return interval_types = { 1: "Sum", 2: "Average value", 3: "Minimum", 4: "Maximum", 5: "Vector average", } for pk, descr in interval_types.items(): IntervalType.objects.create( id=pk, descr=descr, value=descr.upper().replace(" ", "_") ) # Reset the id sequence sqlsequencereset = StringIO() call_command("sqlsequencereset", "enhydris", "--no-color", stdout=sqlsequencereset) sqlsequencereset.seek(0) reset_sequence = [ line for line in sqlsequencereset if '"enhydris_intervaltype"' in line ] assert len(reset_sequence) <= 1 # In later migrations we delete IntervalType. Somehow, then, this sequence does # not exist (probably a bug in the frozen models). This is why we have the "if" # below. if len(reset_sequence) == 1: with connection.cursor() as cursor: cursor.execute(reset_sequence[0]) def reverse_migration(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [("enhydris", "0001_initial")] operations = [ migrations.RunPython(create_countries, reverse_migration), migrations.RunPython(create_interval_types, reverse_migration), ] """ Views for user API """ from django.shortcuts import redirect from django.utils import dateparse from rest_framework import generics, views from rest_framework.decorators import api_view from rest_framework.response import Response from opaque_keys.edx.keys import UsageKey from opaque_keys import InvalidKeyError from courseware.access import is_mobile_available_for_user from courseware.model_data import FieldDataCache from courseware.module_render import get_module_for_descriptor from courseware.views import get_current_child, save_positions_recursively_up from student.models import CourseEnrollment, User from xblock.fields import Scope from xblock.runtime import KeyValueStore from xmodule.modulestore.django import modulestore from xmodule.modulestore.exceptions import ItemNotFoundError from .serializers import CourseEnrollmentSerializer, UserSerializer from .. import errors from ..utils import mobile_view, mobile_course_access @mobile_view(is_user=True) class UserDetail(generics.RetrieveAPIView): """ **Use Case** Get information about the specified user and access other resources the user has permissions for. Users are redirected to this endpoint after logging in. You can use the **course_enrollments** value in the response to get a list of courses the user is enrolled in. **Example request**: GET /api/mobile/v0.5/users/{username} **Response Values** * id: The ID of the user. * username: The username of the currently logged in user. * email: The email address of the currently logged in user. * name: The full name of the currently logged in user. * course_enrollments: The URI to list the courses the currently logged in user is enrolled in. """ queryset = ( User.objects.all() .select_related('profile', 'course_enrollments') ) serializer_class = UserSerializer lookup_field = 'username' @mobile_view(is_user=True) class UserCourseStatus(views.APIView): """ Endpoints for getting and setting meta data about a user's status within a given course. """ http_method_names = ["get", "patch"] def _last_visited_module_path(self, request, course): """ Returns the path from the last module visited by the current user in the given course up to the course module. If there is no such visit, the first item deep enough down the course tree is used. """ field_data_cache = FieldDataCache.cache_for_descriptor_descendents( course.id, request.user, course, depth=2) course_module = get_module_for_descriptor(request.user, request, course, field_data_cache, course.id) current = course_module path = [] child = current while child: path.append(child) child = get_current_child(current) if child: current = child path.reverse() return path def _get_course_info(self, request, course): """ Returns the course status """ path = self._last_visited_module_path(request, course) path_ids = [unicode(module.location) for module in path] return Response({ "last_visited_module_id": path_ids[0], "last_visited_module_path": path_ids, }) def _update_last_visited_module_id(self, request, course, module_key, modification_date): """ Saves the module id if the found modification_date is less recent than the passed modification date """ field_data_cache = FieldDataCache.cache_for_descriptor_descendents( course.id, request.user, course, depth=2) try: module_descriptor = modulestore().get_item(module_key) except ItemNotFoundError: return Response(errors.ERROR_INVALID_MODULE_ID, status=400) module = get_module_for_descriptor(request.user, request, module_descriptor, field_data_cache, course.id) if modification_date: key = KeyValueStore.Key( scope=Scope.user_state, user_id=request.user.id, block_scope_id=course.location, field_name=None ) student_module = field_data_cache.find(key) if student_module: original_store_date = student_module.modified if modification_date < original_store_date: # old modification date so skip update return self._get_course_info(request, course) save_positions_recursively_up(request.user, request, field_data_cache, module) return self._get_course_info(request, course) @mobile_course_access() def get(self, request, course, *args, **kwargs): # pylint: disable=unused-argument """ **Use Case** Get meta data about user's status within a specific course **Example request**: GET /api/mobile/v0.5/users/{username}/course_status_info/{course_id} **Response Values** * last_visited_module_id: The id of the last module visited by the user in the given course * last_visited_module_path: The ids of the modules in the path from the last visited module to the course module """ return self._get_course_info(request, course) @mobile_course_access() def patch(self, request, course, *args, **kwargs): # pylint: disable=unused-argument """ **Use Case** Update meta data about user's status within a specific course **Example request**: PATCH /api/mobile/v0.5/users/{username}/course_status_info/{course_id} body: last_visited_module_id={module_id} modification_date={date} modification_date is optional. If it is present, the update will only take effect if modification_date is later than the modification_date saved on the server **Response Values** The same as doing a GET on this path """ module_id = request.DATA.get("last_visited_module_id") modification_date_string = request.DATA.get("modification_date") modification_date = None if modification_date_string: modification_date = dateparse.parse_datetime(modification_date_string) if not modification_date or not modification_date.tzinfo: return Response(errors.ERROR_INVALID_MODIFICATION_DATE, status=400) if module_id: try: module_key = UsageKey.from_string(module_id) except InvalidKeyError: return Response(errors.ERROR_INVALID_MODULE_ID, status=400) return self._update_last_visited_module_id(request, course, module_key, modification_date) else: # The arguments are optional, so if there's no argument just succeed return self._get_course_info(request, course) @mobile_view(is_user=True) class UserCourseEnrollmentsList(generics.ListAPIView): """ **Use Case** Get information about the courses the currently logged in user is enrolled in. **Example request**: GET /api/mobile/v0.5/users/{username}/course_enrollments/ **Response Values** * created: The date the course was created. * mode: The type of certificate registration for this course: honor or certified. * is_active: Whether the course is currently active; true or false. * course: A collection of data about the course: * course_about: The URI to get the data for the course About page. * course_updates: The URI to get data for course updates. * number: The course number. * org: The organization that created the course. * video_outline: The URI to get the list of all vides the user can access in the course. * id: The unique ID of the course. * latest_updates: Reserved for future use. * end: The end date of the course. * name: The name of the course. * course_handouts: The URI to get data for course handouts. * start: The data and time the course starts. * course_image: The path to the course image. """ queryset = CourseEnrollment.objects.all() serializer_class = CourseEnrollmentSerializer lookup_field = 'username' def get_queryset(self): enrollments = self.queryset.filter( user__username=self.kwargs['username'], is_active=True ).order_by('created').reverse() return [ enrollment for enrollment in enrollments if enrollment.course and is_mobile_available_for_user(self.request.user, enrollment.course) ] @api_view(["GET"]) @mobile_view() def my_user_info(request): """ Redirect to the currently-logged-in user's info page """ return redirect("user-detail", username=request.user.username) ######################## BEGIN LICENSE BLOCK ######################## # The Original Code is Mozilla Communicator client code. # # The Initial Developer of the Original Code is # Netscape Communications Corporation. # Portions created by the Initial Developer are Copyright (C) 1998 # the Initial Developer. All Rights Reserved. # # Contributor(s): # Mark Pilgrim - port to Python # # This library is free software; you can redistribute it and/or # modify it under the terms of the GNU Lesser General Public # License as published by the Free Software Foundation; either # version 2.1 of the License, or (at your option) any later version. # # This library is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU # Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this library; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA # 02110-1301 USA ######################### END LICENSE BLOCK ######################### # KOI8-R language model # Character Mapping Table: KOI8R_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, # 80 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, # 90 223,224,225, 68,226,227,228,229,230,231,232,233,234,235,236,237, # a0 238,239,240,241,242,243,244,245,246,247,248,249,250,251,252,253, # b0 27, 3, 21, 28, 13, 2, 39, 19, 26, 4, 23, 11, 8, 12, 5, 1, # c0 15, 16, 9, 7, 6, 14, 24, 10, 17, 18, 20, 25, 30, 29, 22, 54, # d0 59, 37, 44, 58, 41, 48, 53, 46, 55, 42, 60, 36, 49, 38, 31, 34, # e0 35, 43, 45, 32, 40, 52, 56, 33, 61, 62, 51, 57, 47, 63, 50, 70, # f0 ) win1251_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246, 68,247,248,249,250,251,252,253, 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, ) latin5_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, 239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, ) macCyrillic_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 239,240,241,242,243,244,245,246,247,248,249,250,251,252, 68, 16, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27,255, ) IBM855_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 191,192,193,194, 68,195,196,197,198,199,200,201,202,203,204,205, 206,207,208,209,210,211,212,213,214,215,216,217, 27, 59, 54, 70, 3, 37, 21, 44, 28, 58, 13, 41, 2, 48, 39, 53, 19, 46,218,219, 220,221,222,223,224, 26, 55, 4, 42,225,226,227,228, 23, 60,229, 230,231,232,233,234,235, 11, 36,236,237,238,239,240,241,242,243, 8, 49, 12, 38, 5, 31, 1, 34, 15,244,245,246,247, 35, 16,248, 43, 9, 45, 7, 32, 6, 40, 14, 52, 24, 56, 10, 33, 17, 61,249, 250, 18, 62, 20, 51, 25, 57, 30, 47, 29, 63, 22, 50,251,252,255, ) IBM866_CharToOrderMap = ( 255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00 255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10 253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20 252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30 253,142,143,144,145,146,147,148,149,150,151,152, 74,153, 75,154, # 40 155,156,157,158,159,160,161,162,163,164,165,253,253,253,253,253, # 50 253, 71,172, 66,173, 65,174, 76,175, 64,176,177, 77, 72,178, 69, # 60 67,179, 78, 73,180,181, 79,182,183,184,185,253,253,253,253,253, # 70 37, 44, 33, 46, 41, 48, 56, 51, 42, 60, 36, 49, 38, 31, 34, 35, 45, 32, 40, 52, 53, 55, 58, 50, 57, 63, 70, 62, 61, 47, 59, 43, 3, 21, 10, 19, 13, 2, 24, 20, 4, 23, 11, 8, 12, 5, 1, 15, 191,192,193,194,195,196,197,198,199,200,201,202,203,204,205,206, 207,208,209,210,211,212,213,214,215,216,217,218,219,220,221,222, 223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238, 9, 7, 6, 14, 39, 26, 28, 22, 25, 29, 54, 18, 17, 30, 27, 16, 239, 68,240,241,242,243,244,245,246,247,248,249,250,251,252,255, ) # Model Table: # total sequences: 100% # first 512 sequences: 97.6601% # first 1024 sequences: 2.3389% # rest sequences: 0.1237% # negative sequences: 0.0009% RussianLangModel = ( 0,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,1,3,3,3,3,1,3,3,3,2,3,2,3,3, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,0,3,2,2,2,2,2,0,0,2, 3,3,3,2,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,2,3,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,2,2,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,2,3,3,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1, 0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,0,0,3,3,3,3,3,3,3,3,3,3,3,2,1, 0,0,0,0,0,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,2,2,2,3,1,3,3,1,3,3,3,3,2,2,3,0,2,2,2,3,3,2,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,3,3,3,2,2,3,2,3,3,3,2,1,2,2,0,1,2,2,2,2,2,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,3,0,2,2,3,3,2,1,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,1,2,3,2,2,3,2,3,3,3,3,2,2,3,0,3,2,2,3,1,1,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,2,2,3,3,3,3,3,2,3,3,3,3,2,2,2,0,3,3,3,2,2,2,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,2,3,2,3,3,3,3,3,3,2,3,2,2,0,1,3,2,1,2,2,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,2,1,1,3,0,1,1,1,1,2,1,1,0,2,2,2,1,2,0,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,2,2,2,2,1,3,2,3,2,3,2,1,2,2,0,1,1,2,1,2,1,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,2,3,3,3,2,2,2,2,0,2,2,2,2,3,1,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 3,2,3,2,2,3,3,3,3,3,3,3,3,3,1,3,2,0,0,3,3,3,3,2,3,3,3,3,2,3,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,3,3,2,2,3,3,0,2,1,0,3,2,3,2,3,0,0,1,2,0,0,1,0,1,2,1,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,0,3,0,2,3,3,3,3,2,3,3,3,3,1,2,2,0,0,2,3,2,2,2,3,2,3,2,2,3,0,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,3,0,2,3,2,3,0,1,2,3,3,2,0,2,3,0,0,2,3,2,2,0,1,3,1,3,2,2,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,1,3,0,2,3,3,3,3,3,3,3,3,2,1,3,2,0,0,2,2,3,3,3,2,3,3,0,2,2,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,2,3,3,2,2,2,3,3,0,0,1,1,1,1,1,2,0,0,1,1,1,1,0,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,2,3,3,3,3,3,3,3,0,3,2,3,3,2,3,2,0,2,1,0,1,1,0,1,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,3,2,3,3,3,2,2,2,2,3,1,3,2,3,1,1,2,1,0,2,2,2,2,1,3,1,0, 0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0, 2,2,3,3,3,3,3,1,2,2,1,3,1,0,3,0,0,3,0,0,0,1,1,0,1,2,1,0,0,0,0,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,2,1,1,3,3,3,2,2,1,2,2,3,1,1,2,0,0,2,2,1,3,0,0,2,1,1,2,1,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,2,3,3,3,3,1,2,2,2,1,2,1,3,3,1,1,2,1,2,1,2,2,0,2,0,0,1,1,0,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,3,3,2,1,3,2,2,3,2,0,3,2,0,3,0,1,0,1,1,0,0,1,1,1,1,0,1,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,2,3,3,3,2,2,2,3,3,1,2,1,2,1,0,1,0,1,1,0,1,0,0,2,1,1,1,0,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0, 3,1,1,2,1,2,3,3,2,2,1,2,2,3,0,2,1,0,0,2,2,3,2,1,2,2,2,2,2,3,1,0, 0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 3,3,3,3,3,1,1,0,1,1,2,2,1,1,3,0,0,1,3,1,1,1,0,0,0,1,0,1,1,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,1,3,3,3,2,0,0,0,2,1,0,1,0,2,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,0,1,0,0,2,3,2,2,2,1,2,2,2,1,2,1,0,0,1,1,1,0,2,0,1,1,1,0,0,1,1, 1,0,0,0,0,0,1,2,0,0,0,0,0,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,3,0,0,0,0,1,0,0,0,0,3,0,1,2,1,0,0,0,0,0,0,0,1,1,0,0,1,1, 1,0,1,0,1,2,0,0,1,1,2,1,0,1,1,1,1,0,1,1,1,1,0,1,0,0,1,0,0,1,1,0, 2,2,3,2,2,2,3,1,2,2,2,2,2,2,2,2,1,1,1,1,1,1,1,0,1,0,1,1,1,0,2,1, 1,1,1,1,1,1,1,1,2,1,1,1,1,1,1,1,1,1,1,0,1,0,1,1,0,1,1,1,0,1,1,0, 3,3,3,2,2,2,2,3,2,2,1,1,2,2,2,2,1,1,3,1,2,1,2,0,0,1,1,0,1,0,2,1, 1,1,1,1,1,2,1,0,1,1,1,1,0,1,0,0,1,1,0,0,1,0,1,0,0,1,0,0,0,1,1,0, 2,0,0,1,0,3,2,2,2,2,1,2,1,2,1,2,0,0,0,2,1,2,2,1,1,2,2,0,1,1,0,2, 1,1,1,1,1,0,1,1,1,2,1,1,1,2,1,0,1,2,1,1,1,1,0,1,1,1,0,0,1,0,0,1, 1,3,2,2,2,1,1,1,2,3,0,0,0,0,2,0,2,2,1,0,0,0,0,0,0,1,0,0,0,0,1,1, 1,0,1,1,0,1,0,1,1,0,1,1,0,2,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0, 2,3,2,3,2,1,2,2,2,2,1,0,0,0,2,0,0,1,1,0,0,0,0,0,0,0,1,1,0,0,2,1, 1,1,2,1,0,2,0,0,1,0,1,0,0,1,0,0,1,1,0,1,1,0,0,0,0,0,1,0,0,0,0,0, 3,0,0,1,0,2,2,2,3,2,2,2,2,2,2,2,0,0,0,2,1,2,1,1,1,2,2,0,0,0,1,2, 1,1,1,1,1,0,1,2,1,1,1,1,1,1,1,0,1,1,1,1,1,1,0,1,1,1,1,1,1,0,0,1, 2,3,2,3,3,2,0,1,1,1,0,0,1,0,2,0,1,1,3,1,0,0,0,0,0,0,0,1,0,0,2,1, 1,1,1,1,1,1,1,0,1,0,1,1,1,1,0,1,1,1,0,0,1,1,0,1,0,0,0,0,0,0,1,0, 2,3,3,3,3,1,2,2,2,2,0,1,1,0,2,1,1,1,2,1,0,1,1,0,0,1,0,1,0,0,2,0, 0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 2,3,3,3,2,0,0,1,1,2,2,1,0,0,2,0,1,1,3,0,0,1,0,0,0,0,0,1,0,1,2,1, 1,1,2,0,1,1,1,0,1,0,1,1,0,1,0,1,1,1,1,0,1,0,0,0,0,0,0,1,0,1,1,0, 1,3,2,3,2,1,0,0,2,2,2,0,1,0,2,0,1,1,1,0,1,0,0,0,3,0,1,1,0,0,2,1, 1,1,1,0,1,1,0,0,0,0,1,1,0,1,0,0,2,1,1,0,1,0,0,0,1,0,1,0,0,1,1,0, 3,1,2,1,1,2,2,2,2,2,2,1,2,2,1,1,0,0,0,2,2,2,0,0,0,1,2,1,0,1,0,1, 2,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,2,1,1,1,0,1,0,1,1,0,1,1,1,0,0,1, 3,0,0,0,0,2,0,1,1,1,1,1,1,1,0,1,0,0,0,1,1,1,0,1,0,1,1,0,0,1,0,1, 1,1,0,0,1,0,0,0,1,0,1,1,0,0,1,0,1,0,1,0,0,0,0,1,0,0,0,1,0,0,0,1, 1,3,3,2,2,0,0,0,2,2,0,0,0,1,2,0,1,1,2,0,0,0,0,0,0,0,0,1,0,0,2,1, 0,1,1,0,0,1,1,0,0,0,1,1,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0, 2,3,2,3,2,0,0,0,0,1,1,0,0,0,2,0,2,0,2,0,0,0,0,0,1,0,0,1,0,0,1,1, 1,1,2,0,1,2,1,0,1,1,2,1,1,1,1,1,2,1,1,0,1,0,0,1,1,1,1,1,0,1,1,0, 1,3,2,2,2,1,0,0,2,2,1,0,1,2,2,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,1,1, 0,0,1,1,0,1,1,0,0,1,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 1,0,0,1,0,2,3,1,2,2,2,2,2,2,1,1,0,0,0,1,0,1,0,2,1,1,1,0,0,0,0,1, 1,1,0,1,1,0,1,1,1,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0, 2,0,2,0,0,1,0,3,2,1,2,1,2,2,0,1,0,0,0,2,1,0,0,2,1,1,1,1,0,2,0,2, 2,1,1,1,1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,0,0,0,1,1,1,1,0,1,0,0,1, 1,2,2,2,2,1,0,0,1,0,0,0,0,0,2,0,1,1,1,1,0,0,0,0,1,0,1,2,0,0,2,0, 1,0,1,1,1,2,1,0,1,0,1,1,0,0,1,0,1,1,1,0,1,0,0,0,1,0,0,1,0,1,1,0, 2,1,2,2,2,0,3,0,1,1,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 0,0,0,1,1,1,0,0,1,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0, 1,2,2,3,2,2,0,0,1,1,2,0,1,2,1,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1, 0,1,1,0,0,1,1,0,0,1,1,0,0,1,1,0,1,1,0,0,1,0,0,0,0,0,0,0,0,1,1,0, 2,2,1,1,2,1,2,2,2,2,2,1,2,2,0,1,0,0,0,1,2,2,2,1,2,1,1,1,1,1,2,1, 1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,0,1, 1,2,2,2,2,0,1,0,2,2,0,0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0, 0,0,1,0,0,1,0,0,0,0,1,0,1,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,2,2,0,0,0,2,2,2,0,1,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,1, 0,1,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,2,2,0,0,0,0,1,0,0,1,1,2,0,0,0,0,1,0,1,0,0,1,0,0,2,0,0,0,1, 0,0,1,0,0,1,0,0,0,1,1,0,0,0,0,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0, 1,2,2,2,1,1,2,0,2,1,1,1,1,0,2,2,0,0,0,0,0,0,0,0,0,1,1,0,0,0,1,1, 0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0, 1,0,2,1,2,0,0,0,0,0,1,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0, 0,0,1,0,1,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0, 1,0,0,0,0,2,0,1,2,1,0,1,1,1,0,1,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,1, 0,0,0,0,0,1,0,0,1,1,0,0,1,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1, 2,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 1,0,0,0,1,0,0,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0, 2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 1,1,1,0,1,0,1,0,0,1,1,1,1,0,0,0,1,0,0,0,0,1,0,0,0,1,0,1,0,0,0,0, 1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1, 1,1,0,1,1,0,1,0,1,0,0,0,0,1,1,0,1,1,0,0,0,0,0,1,0,1,1,0,1,0,0,0, 0,1,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,1,0,0,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0, ) Koi8rModel = { 'charToOrderMap': KOI8R_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "KOI8-R" } Win1251CyrillicModel = { 'charToOrderMap': win1251_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "windows-1251" } Latin5CyrillicModel = { 'charToOrderMap': latin5_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "ISO-8859-5" } MacCyrillicModel = { 'charToOrderMap': macCyrillic_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "MacCyrillic" }; Ibm866Model = { 'charToOrderMap': IBM866_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "IBM866" } Ibm855Model = { 'charToOrderMap': IBM855_CharToOrderMap, 'precedenceMatrix': RussianLangModel, 'mTypicalPositiveRatio': 0.976601, 'keepEnglishLetter': False, 'charsetName': "IBM855" } # flake8: noqa from django.contrib import admin from django.shortcuts import redirect from django.urls import reverse from django.utils.translation import ugettext_lazy as _ from waldur_core.core import admin as core_admin from . import models, tasks class ProfileAdmin(core_admin.ExtraActionsMixin, admin.ModelAdmin): list_display = ('username', 'user', 'is_active', 'agreement_date') readonly_fields = ('username', 'user', 'is_active', 'agreement_date') list_filter = ('is_active',) search_fields = ('username',) def has_add_permission(self, request, obj=None): return False def has_delete_permission(self, request, obj=None): if request.user.is_staff: return True return False def get_extra_actions(self): return [ self.sync_groups, self.sync_names, self.sync_gecos, ] def sync_groups(self, request): tasks.schedule_sync() self.message_user(request, _('Groups synchronization has been scheduled.')) return redirect(reverse('admin:waldur_freeipa_profile_changelist')) def sync_names(self, request): tasks.schedule_sync_names() self.message_user(request, _('Names synchronization has been scheduled.')) return redirect(reverse('admin:waldur_freeipa_profile_changelist')) def sync_gecos(self, request): tasks.schedule_sync_gecos() self.message_user(request, _('GECOS synchronization has been scheduled.')) return redirect(reverse('admin:waldur_freeipa_profile_changelist')) admin.site.register(models.Profile, ProfileAdmin) # Copyright 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import datetime from django import test from common import api from common import models from common import properties class DbCacheTest(test.TestCase): entry_keys = ('stream/popular@example.com/presence/12345', 'stream/popular@example.com/presence/12346') def test_with_cache(self): models.CachingModel.reset_get_count() models.CachingModel.reset_cache() models.CachingModel.enable_cache() api.entry_get_entries(api.ROOT, self.entry_keys) self.assertNotEqual(models.CachingModel.db_get_count(), 0) first_count = models.CachingModel.db_get_count() api.entry_get_entries(api.ROOT, self.entry_keys) self.assertEqual(models.CachingModel.db_get_count(), first_count) def test_without_cache(self): models.CachingModel.reset_get_count() models.CachingModel.reset_cache() models.CachingModel.enable_cache(False) api.entry_get_entries(api.ROOT, self.entry_keys) self.assertNotEqual(models.CachingModel.db_get_count(), 0) first_count = models.CachingModel.db_get_count() api.entry_get_entries(api.ROOT, self.entry_keys) self.assertNotEqual(models.CachingModel.db_get_count(), first_count) class PropertyTestCase(test.TestCase): def test_datetimeproperty_validate(self): p = properties.DateTimeProperty() validated = p.validate("2008-01-01 02:03:04") self.assertEquals(validated, datetime.datetime(2008, 01, 01, 02, 03, 04)) validated = None try: validated = p.validate("2008-01-01") except: pass self.assertEquals(validated, None) p = properties.DateTimeProperty() validated = p.validate("2008-01-01 02:03:04.000567") self.assertEquals(validated, datetime.datetime(2008, 01, 01, 02, 03, 04, 567)) from __future__ import absolute_import, division, unicode_literals from pip._vendor.six import text_type from bisect import bisect_left from ._base import Trie as ABCTrie class Trie(ABCTrie): def __init__(self, data): if not all(isinstance(x, text_type) for x in data.keys()): raise TypeError("All keys must be strings") self._data = data self._keys = sorted(data.keys()) self._cachestr = "" self._cachepoints = (0, len(data)) def __contains__(self, key): return key in self._data def __len__(self): return len(self._data) def __iter__(self): return iter(self._data) def __getitem__(self, key): return self._data[key] def keys(self, prefix=None): if prefix is None or prefix == "" or not self._keys: return set(self._keys) if prefix.startswith(self._cachestr): lo, hi = self._cachepoints start = i = bisect_left(self._keys, prefix, lo, hi) else: start = i = bisect_left(self._keys, prefix) keys = set() if start == len(self._keys): return keys while self._keys[i].startswith(prefix): keys.add(self._keys[i]) i += 1 self._cachestr = prefix self._cachepoints = (start, i) return keys def has_keys_with_prefix(self, prefix): if prefix in self._data: return True if prefix.startswith(self._cachestr): lo, hi = self._cachepoints i = bisect_left(self._keys, prefix, lo, hi) else: i = bisect_left(self._keys, prefix) if i == len(self._keys): return False return self._keys[i].startswith(prefix) ### # # Copyright Alan Kennedy. # # You may contact the copyright holder at this uri: # # http://www.xhaus.com/contact/modjy # # The licence under which this code is released is the Apache License v2.0. # # The terms and conditions of this license are listed in a file contained # in the distribution that also contained this file, under the name # LICENSE.txt. # # You may also read a copy of the license at the following web address. # # http://modjy.xhaus.com/LICENSE.txt # ### import sys import synchronize from java.io import File from modjy_exceptions import * class modjy_publisher: def init_publisher(self): self.cache = None if self.params['app_directory']: self.app_directory = self.expand_relative_path(self.params['app_directory']) else: self.app_directory = self.servlet_context.getRealPath('/') self.params['app_directory'] = self.app_directory if self.app_directory is not None and not self.app_directory in sys.path: sys.path.append(self.app_directory) def map_uri(self, req, environ): source_uri = '%s%s%s' % (self.app_directory, File.separator, self.params['app_filename']) callable_name = self.params['app_callable_name'] if self.params['callable_query_name']: query_string = req.getQueryString() if query_string: for name_val in query_string.split('&'): if name_val.find('=') != -1: name, value = name_val.split('=', 1) else: name, value = name_val, '' if name == self.params['callable_query_name']: callable_name = value else: callable_name = '' return source_uri, callable_name def get_app_object(self, req, environ): environ["SCRIPT_NAME"] = "%s%s" % (req.getContextPath(), req.getServletPath()) path_info = req.getPathInfo() or "" environ["PATH_INFO"] = path_info environ["PATH_TRANSLATED"] = File(self.app_directory, path_info).getPath() if self.params['app_import_name']: return self.get_app_object_importable(self.params['app_import_name']) else: if self.cache is None: self.cache = {} return self.get_app_object_old_style(req, environ) get_app_object = synchronize.make_synchronized(get_app_object) def get_app_object_importable(self, importable_name): self.log.debug("Attempting to import application callable '%s'\n" % (importable_name, )) # Under the importable mechanism, the cache contains a single object if self.cache is None: application, instantiable, method_name = self.load_importable(importable_name.strip()) if instantiable and self.params['cache_callables']: application = application() self.cache = application, instantiable, method_name application, instantiable, method_name = self.cache self.log.debug("Application is " + str(application)) if instantiable and not self.params['cache_callables']: application = application() self.log.debug("Instantiated application is " + str(application)) if method_name is not None: if not hasattr(application, method_name): self.log.fatal("Attribute error application callable '%s' as no method '%s'" % (application, method_name)) self.raise_exc(ApplicationNotFound, "Attribute error application callable '%s' as no method '%s'" % (application, method_name)) application = getattr(application, method_name) self.log.debug("Application method is " + str(application)) return application def load_importable(self, name): try: instantiable = False ; method_name = None importable_name = name if name.find('()') != -1: instantiable = True importable_name, method_name = name.split('()') if method_name.startswith('.'): method_name = method_name[1:] if not method_name: method_name = None module_path, from_name = importable_name.rsplit('.', 1) imported = __import__(module_path, globals(), locals(), [from_name]) imported = getattr(imported, from_name) return imported, instantiable, method_name except (ImportError, AttributeError), aix: self.log.fatal("Import error import application callable '%s': %s\n" % (name, str(aix))) self.raise_exc(ApplicationNotFound, "Failed to import app callable '%s': %s" % (name, str(aix))) def get_app_object_old_style(self, req, environ): source_uri, callable_name = self.map_uri(req, environ) source_filename = source_uri if not self.params['cache_callables']: self.log.debug("Caching of callables disabled") return self.load_object(source_filename, callable_name) if not self.cache.has_key( (source_filename, callable_name) ): self.log.debug("Callable object not in cache: %s#%s" % (source_filename, callable_name) ) return self.load_object(source_filename, callable_name) app_callable, last_mod = self.cache.get( (source_filename, callable_name) ) self.log.debug("Callable object was in cache: %s#%s" % (source_filename, callable_name) ) if self.params['reload_on_mod']: f = File(source_filename) if f.lastModified() > last_mod: self.log.info("Source file '%s' has been modified: reloading" % source_filename) return self.load_object(source_filename, callable_name) return app_callable def load_object(self, path, callable_name): try: app_ns = {} ; execfile(path, app_ns) app_callable = app_ns[callable_name] f = File(path) self.cache[ (path, callable_name) ] = (app_callable, f.lastModified()) return app_callable except IOError, ioe: self.raise_exc(ApplicationNotFound, "Application filename not found: %s" % path) except KeyError, k: self.raise_exc(NoCallable, "No callable named '%s' in %s" % (callable_name, path)) except Exception, x: self.raise_exc(NoCallable, "Error loading jython callable '%s': %s" % (callable_name, str(x)) ) import calendar import datetime import itertools import os import re import time import zlib from base.constants import CONSTANTS from datasources import thumbnails, twitter, twitterappengine, twitterdisplay from datasources.oauth_keys import SERVICE_PROVIDERS TWITTER_SERVICE_PROVIDER = SERVICE_PROVIDERS['tweetdigest:twitter'] DIGEST_LENGTH_SEC = 60 * 60 * 24 TWITTER_USERNAME_RE = re.compile('^[a-zA-Z0-9_]{1,15}$') def _get_digest_twitter_api(max_cache_age, key): # We don't actually need to use authentication for any of the data that # we fetch, but then we end up with IP address-based rate limiting, which # is depleted very quickly on App Engine (where there aren't a lot of # externally visible IP addresses). We therefore authenticate anyway, and we # spread that load over a few accounts. To ensure consistency (since # python-twitter incorporates the access token in the cache key), we always # want to consistently use the same access token for the same request, hence # the hashing based on the key that's passed in. access_token = TWITTER_SERVICE_PROVIDER.access_tokens[ zlib.adler32(key.encode('utf-8')) % len(TWITTER_SERVICE_PROVIDER.access_tokens)] api = twitter.Api( consumer_key=TWITTER_SERVICE_PROVIDER.consumer.key, consumer_secret=TWITTER_SERVICE_PROVIDER.consumer.secret, access_token_key=access_token.key, access_token_secret=access_token.secret, cache=twitterappengine.MemcacheCache()) api.SetCacheTimeout(max_cache_age) api.SetUserAgent('StreamSpigot/%s (+%s)' % ( os.environ.get('CURRENT_VERSION_ID', '1'), CONSTANTS.APP_URL, )) return api def _get_digest_timestamps(): # From the current time now = time.gmtime() # Go back to midnight digest_end_time = calendar.timegm([ now.tm_year, now.tm_mon, now.tm_mday, 0, 0, 0, now.tm_wday, now.tm_yday, now.tm_isdst ]) digest_start_time = digest_end_time - DIGEST_LENGTH_SEC # Twitter data can be as stale as the digest end time, since we don't care # about anything more recent (there may be some concurrency issues with # parallell invocations, but they're unlikely to actually matter at the load # we're expecting. max_cache_age = calendar.timegm(now) - digest_end_time return digest_start_time, digest_end_time, max_cache_age def get_digest_dates(): digest_start_time, digest_end_time, max_cache_age = _get_digest_timestamps() return (datetime.datetime.fromtimestamp(digest_start_time), datetime.datetime.fromtimestamp(digest_end_time)) def _process_digest_statuses( statuses, digest_start_time, digest_end_time, error_info, dev_mode, timezone=None): if not dev_mode: # Filter them for the ones that fall in the window digest_statuses = [ s for s in statuses if s.created_at_in_seconds <= digest_end_time and s.created_at_in_seconds > digest_start_time ] else: digest_statuses = statuses # Order them in chronological order digest_statuses.sort( lambda x, y: int(x.created_at_in_seconds - y.created_at_in_seconds)) if dev_mode: digest_statuses.reverse() # Group them by username status_groups = [] for username, statuses in itertools.groupby( digest_statuses, lambda status: status.user.id): statuses = list(statuses) status_groups.append(twitterdisplay.DisplayStatusGroup( user=statuses[0].user, statuses=statuses, thumbnail_size=thumbnails.SMALL_THUMBNAIL, timezone=timezone)) return status_groups, error_info class TwitterFetcher(object): def fetch(self): data, had_error = twitterappengine.exec_twitter_api( self._fetch, error_detail=self._id()) return data or [], had_error class ListTwitterFetcher(TwitterFetcher): def __init__(self, api, list_owner, list_id, digest_start_time): self._api = api self._list_owner = list_owner self._list_id = list_id self._digest_start_time = digest_start_time def _fetch(self): statuses = [] while True: max_id = len(statuses) and statuses[-1].id - 1 or None chunk = self._api.GetListTimeline( slug=self._list_id, owner_screen_name=self._list_owner, max_id=max_id, count=40, include_rts=True, include_entities=True) statuses.extend(chunk) if not chunk or \ chunk[-1].created_at_in_seconds < self._digest_start_time: break return statuses def _id(self): return 'list "%s/%s"' % (self._list_owner, self._list_id) class UserTwitterFetcher(TwitterFetcher): def __init__( self, api, username, digest_start_time, digest_end_time, dev_mode): self._api = api self._username = username self._digest_start_time = digest_start_time self._digest_end_time = digest_end_time self._dev_mode = dev_mode def _fetch(self): timeline = self._api.GetUserTimeline( screen_name=self._username, count=40) if not self._dev_mode: # We do the filtering now, so that we don't look up user objects that # we don't need. timeline = [ s for s in timeline if s.created_at_in_seconds <= self._digest_end_time and s.created_at_in_seconds > self._digest_start_time ] return timeline def _id(self): return 'user "%s"' % self._username def get_digest_for_list(list_owner, list_id, dev_mode): digest_start_time, digest_end_time, max_cache_age = _get_digest_timestamps() api = _get_digest_twitter_api( max_cache_age, key='%s/%s' % (list_owner, list_id)) user, had_error = twitterappengine.exec_twitter_api( lambda: api.GetUser(screen_name=list_owner, include_entities=False), error_detail='user %s' % list_owner) if not had_error: timezone = twitterdisplay.get_timezone_for_user(user) else: timezone = None fetcher = ListTwitterFetcher(api, list_owner, list_id, digest_start_time) statuses, had_error = fetcher.fetch() return _process_digest_statuses( statuses, digest_start_time, digest_end_time, had_error, dev_mode, timezone=timezone) def get_digest_for_usernames(usernames, dev_mode): digest_start_time, digest_end_time, max_cache_age = _get_digest_timestamps() statuses = [] error_usernames = [] for username in usernames: api = _get_digest_twitter_api(max_cache_age, key=username) fetcher = UserTwitterFetcher( api, username, digest_start_time, digest_end_time, dev_mode) user_statuses, had_error = fetcher.fetch() if had_error: error_usernames.append(username) else: statuses.extend(user_statuses) return _process_digest_statuses( statuses, digest_start_time, digest_end_time, error_usernames, dev_mode, timezone=None) class UserListsTwitterFetcher(TwitterFetcher): def __init__(self, api, username): self._api = api self._username = username def _fetch(self): return self._api.GetLists(screen_name=self._username) def _id(self): return 'lists "%s"' % self._username def get_lists(username): api = _get_digest_twitter_api(3600, key=username) fetcher = UserListsTwitterFetcher(api, username) lists, had_error = fetcher.fetch() return had_error and None or lists def is_valid_twitter_username(username): return TWITTER_USERNAME_RE.match(username) is not None """SCons.Tool.zip Tool-specific initialization for zip. There normally shouldn't be any need to import this module directly. It will usually be imported through the generic SCons.Tool.Tool() selection method. """ # # Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 The SCons Foundation # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be included # in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY # KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE # WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # __revision__ = "src/engine/SCons/Tool/zip.py 5134 2010/08/16 23:02:40 bdeegan" import os.path import SCons.Builder import SCons.Defaults import SCons.Node.FS import SCons.Util try: import zipfile internal_zip = 1 except ImportError: internal_zip = 0 if internal_zip: zipcompression = zipfile.ZIP_DEFLATED def zip(target, source, env): compression = env.get('ZIPCOMPRESSION', 0) zf = zipfile.ZipFile(str(target[0]), 'w', compression) for s in source: if s.isdir(): for dirpath, dirnames, filenames in os.walk(str(s)): for fname in filenames: path = os.path.join(dirpath, fname) if os.path.isfile(path): zf.write(path) else: zf.write(str(s)) zf.close() else: zipcompression = 0 zip = "$ZIP $ZIPFLAGS ${TARGET.abspath} $SOURCES" zipAction = SCons.Action.Action(zip, varlist=['ZIPCOMPRESSION']) ZipBuilder = SCons.Builder.Builder(action = SCons.Action.Action('$ZIPCOM', '$ZIPCOMSTR'), source_factory = SCons.Node.FS.Entry, source_scanner = SCons.Defaults.DirScanner, suffix = '$ZIPSUFFIX', multi = 1) def generate(env): """Add Builders and construction variables for zip to an Environment.""" try: bld = env['BUILDERS']['Zip'] except KeyError: bld = ZipBuilder env['BUILDERS']['Zip'] = bld env['ZIP'] = 'zip' env['ZIPFLAGS'] = SCons.Util.CLVar('') env['ZIPCOM'] = zipAction env['ZIPCOMPRESSION'] = zipcompression env['ZIPSUFFIX'] = '.zip' def exists(env): return internal_zip or env.Detect('zip') # Local Variables: # tab-width:4 # indent-tabs-mode:nil # End: # vim: set expandtab tabstop=4 shiftwidth=4: #!/usr/bin/python import socket import socks SERVER_IP = '127.0.0.1' SERVER_PORT = 1081 if __name__ == '__main__': # Test 1: same source port IPv4 sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM, socket.SOL_UDP) sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT) sock_out.bind(('127.0.0.1', 9000)) sock_in1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.SOL_UDP) sock_in2 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.SOL_UDP) sock_in1.bind(('127.0.0.1', 9001)) sock_in2.bind(('127.0.0.1', 9002)) sock_out.sendto(b'data', ('127.0.0.1', 9001)) result1 = sock_in1.recvfrom(8) sock_out.sendto(b'data', ('127.0.0.1', 9002)) result2 = sock_in2.recvfrom(8) sock_out.close() sock_in1.close() sock_in2.close() # make sure they're from the same source port assert result1 == result2 # Test 2: same source port IPv6 # try again from the same port but IPv6 sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM, socket.SOL_UDP) sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT) sock_out.bind(('127.0.0.1', 9000)) sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.SOL_UDP) sock_in2 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.SOL_UDP) sock_in1.bind(('::1', 9001)) sock_in2.bind(('::1', 9002)) sock_out.sendto(b'data', ('::1', 9001)) result1 = sock_in1.recvfrom(8) sock_out.sendto(b'data', ('::1', 9002)) result2 = sock_in2.recvfrom(8) sock_out.close() sock_in1.close() sock_in2.close() # make sure they're from the same source port assert result1 == result2 # Test 3: different source ports IPv6 sock_out = socks.socksocket(socket.AF_INET, socket.SOCK_DGRAM, socket.SOL_UDP) sock_out.set_proxy(socks.SOCKS5, SERVER_IP, SERVER_PORT) sock_out.bind(('127.0.0.1', 9003)) sock_in1 = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM, socket.SOL_UDP) sock_in1.bind(('::1', 9001)) sock_out.sendto(b'data', ('::1', 9001)) result3 = sock_in1.recvfrom(8) # make sure they're from different source ports assert result1 != result3 sock_out.close() sock_in1.close() # Copyright (C) 2014-2015 Andrey Antukh # Copyright (C) 2014-2015 Jesús Espino # Copyright (C) 2014-2015 David Barragán # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . import uuid from django.core.urlresolvers import reverse from django.conf import settings from taiga.users.models import User from taiga.base.utils.urls import get_absolute_url # Set this in settings.PROJECT_MODULES_CONFIGURATORS["bitbucket"] def get_or_generate_config(project): config = project.modules_config.config if config and "bitbucket" in config: g_config = project.modules_config.config["bitbucket"] else: g_config = { "secret": uuid.uuid4().hex, "valid_origin_ips": settings.BITBUCKET_VALID_ORIGIN_IPS, } url = reverse("bitbucket-hook-list") url = get_absolute_url(url) url = "%s?project=%s&key=%s" % (url, project.id, g_config["secret"]) g_config["webhooks_url"] = url return g_config def get_bitbucket_user(user_id): return User.objects.get(is_system=True, username__startswith="bitbucket") # # Copyright 2009 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # from gnuradio import gru import wx DEFAULT_WIN_SIZE = (600, 300) APPEND_EVENT = wx.NewEventType() EVT_APPEND_EVENT = wx.PyEventBinder(APPEND_EVENT, 0) class AppendEvent(wx.PyEvent): def __init__(self, text): wx.PyEvent.__init__(self) self.SetEventType(APPEND_EVENT) self.text = text def Clone(self): self.__class__(self.GetId()) class termsink(wx.Panel): def __init__(self, parent, msgq, size=DEFAULT_WIN_SIZE, ): wx.Panel.__init__(self, parent, size=size, style=wx.SIMPLE_BORDER, ) self.text_ctrl = wx.TextCtrl(self, wx.ID_ANY, value="", size=size, style=wx.TE_MULTILINE|wx.TE_READONLY, ) main_sizer = wx.BoxSizer(wx.VERTICAL) main_sizer.Add(self.text_ctrl, 1, wx.EXPAND) self.SetSizerAndFit(main_sizer) EVT_APPEND_EVENT(self, self.evt_append) self.runner = gru.msgq_runner(msgq, self.handle_msg) def handle_msg(self, msg): # This gets called in the queue runner thread context # For now, just add whatever the user sends to the text control text = msg.to_string() # Create a wxPython event and post it to the event queue evt = AppendEvent(text) wx.PostEvent(self, evt) del evt def evt_append(self, evt): # This gets called by the wxPython event queue runner self.text_ctrl.AppendText(evt.text) #!/usr/bin/python """ Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements. See the NOTICE file distributed with this work for additional information regarding copyright ownership. The ASF licenses this file to you under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from resource_management.libraries.script.script import Script from resource_management.libraries.functions.check_process_status import check_process_status from resource_management.libraries.functions.stack_features import check_stack_feature from resource_management.libraries.functions.constants import StackFeature from resource_management.core.exceptions import Fail from resource_management.core.resources.system import Execute from resource_management.libraries.providers.hdfs_resource import WebHDFSUtil from resource_management.libraries.providers.hdfs_resource import HdfsResourceProvider from resource_management import is_empty from resource_management import shell from resource_management.libraries.functions.decorator import retry from resource_management.core.logger import Logger from resource_management.libraries.functions.format import format from resource_management.libraries.functions import conf_select, stack_select from livy_service import livy_service from setup_livy import setup_livy class LivyServer(Script): def install(self, env): import params env.set_params(params) self.install_packages(env) def configure(self, env, upgrade_type=None): import params env.set_params(params) setup_livy(env, 'server', upgrade_type=upgrade_type, action = 'config') def start(self, env, upgrade_type=None): import params env.set_params(params) if params.has_ats and params.has_livyserver: Logger.info("Verifying DFS directories where ATS stores time line data for active and completed applications.") self.wait_for_dfs_directories_created([params.entity_groupfs_store_dir, params.entity_groupfs_active_dir]) self.configure(env) livy_service('server', upgrade_type=upgrade_type, action='start') def stop(self, env, upgrade_type=None): import params env.set_params(params) livy_service('server', upgrade_type=upgrade_type, action='stop') def status(self, env): import status_params env.set_params(status_params) check_process_status(status_params.livy_server_pid_file) # TODO move out and compose with similar method in resourcemanager.py def wait_for_dfs_directories_created(self, dirs): import params ignored_dfs_dirs = HdfsResourceProvider.get_ignored_resources_list(params.hdfs_resource_ignore_file) if params.security_enabled: Execute(format("{kinit_path_local} -kt {livy_kerberos_keytab} {livy_principal}"), user=params.livy_user ) Execute(format("{kinit_path_local} -kt {hdfs_user_keytab} {hdfs_principal_name}"), user=params.hdfs_user ) for dir_path in dirs: self.wait_for_dfs_directory_created(dir_path, ignored_dfs_dirs) @retry(times=8, sleep_time=20, backoff_factor=1, err_class=Fail) def wait_for_dfs_directory_created(self, dir_path, ignored_dfs_dirs): import params if not is_empty(dir_path): dir_path = HdfsResourceProvider.parse_path(dir_path) if dir_path in ignored_dfs_dirs: Logger.info("Skipping DFS directory '" + dir_path + "' as it's marked to be ignored.") return Logger.info("Verifying if DFS directory '" + dir_path + "' exists.") dir_exists = None if WebHDFSUtil.is_webhdfs_available(params.is_webhdfs_enabled, params.default_fs): # check with webhdfs is much faster than executing hdfs dfs -test util = WebHDFSUtil(params.hdfs_site, params.hdfs_user, params.security_enabled) list_status = util.run_command(dir_path, 'GETFILESTATUS', method='GET', ignore_status_codes=['404'], assertable_result=False) dir_exists = ('FileStatus' in list_status) else: # have to do time expensive hdfs dfs -d check. dfs_ret_code = shell.call(format("hdfs --config {hadoop_conf_dir} dfs -test -d " + dir_path), user=params.livy_user)[0] dir_exists = not dfs_ret_code #dfs -test -d returns 0 in case the dir exists if not dir_exists: raise Fail("DFS directory '" + dir_path + "' does not exist !") else: Logger.info("DFS directory '" + dir_path + "' exists.") def get_component_name(self): return "livy-server" def pre_upgrade_restart(self, env, upgrade_type=None): import params env.set_params(params) if params.version and check_stack_feature(StackFeature.ROLLING_UPGRADE, params.version): Logger.info("Executing Livy Server Stack Upgrade pre-restart") conf_select.select(params.stack_name, "spark", params.version) stack_select.select("livy-server", params.version) def get_log_folder(self): import params return params.livy_log_dir def get_user(self): import params return params.livy_user if __name__ == "__main__": LivyServer().execute() #!/usr/bin/env python2 # Copyright (c) 2014 The BeCoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # # Test node handling # from test_framework.test_framework import BeCoinTestFramework from test_framework.util import * import base64 try: import http.client as httplib except ImportError: import httplib try: import urllib.parse as urlparse except ImportError: import urlparse class NodeHandlingTest (BeCoinTestFramework): def run_test(self): ########################### # setban/listbanned tests # ########################### assert_equal(len(self.nodes[2].getpeerinfo()), 4) #we should have 4 nodes at this point self.nodes[2].setban("127.0.0.1", "add") time.sleep(3) #wait till the nodes are disconected assert_equal(len(self.nodes[2].getpeerinfo()), 0) #all nodes must be disconnected at this point assert_equal(len(self.nodes[2].listbanned()), 1) self.nodes[2].clearbanned() assert_equal(len(self.nodes[2].listbanned()), 0) self.nodes[2].setban("127.0.0.0/24", "add") assert_equal(len(self.nodes[2].listbanned()), 1) try: self.nodes[2].setban("127.0.0.1", "add") #throws exception because 127.0.0.1 is within range 127.0.0.0/24 except: pass assert_equal(len(self.nodes[2].listbanned()), 1) #still only one banned ip because 127.0.0.1 is within the range of 127.0.0.0/24 try: self.nodes[2].setban("127.0.0.1", "remove") except: pass assert_equal(len(self.nodes[2].listbanned()), 1) self.nodes[2].setban("127.0.0.0/24", "remove") assert_equal(len(self.nodes[2].listbanned()), 0) self.nodes[2].clearbanned() assert_equal(len(self.nodes[2].listbanned()), 0) ##test persisted banlist self.nodes[2].setban("127.0.0.0/32", "add") self.nodes[2].setban("127.0.0.0/24", "add") self.nodes[2].setban("192.168.0.1", "add", 1) #ban for 1 seconds self.nodes[2].setban("2001:4d48:ac57:400:cacf:e9ff:fe1d:9c63/19", "add", 1000) #ban for 1000 seconds listBeforeShutdown = self.nodes[2].listbanned(); assert_equal("192.168.0.1/255.255.255.255", listBeforeShutdown[2]['address']) #must be here time.sleep(2) #make 100% sure we expired 192.168.0.1 node time #stop node stop_node(self.nodes[2], 2) self.nodes[2] = start_node(2, self.options.tmpdir) listAfterShutdown = self.nodes[2].listbanned(); assert_equal("127.0.0.0/255.255.255.0", listAfterShutdown[0]['address']) assert_equal("127.0.0.0/255.255.255.255", listAfterShutdown[1]['address']) assert_equal("2001:4000::/ffff:e000:0:0:0:0:0:0", listAfterShutdown[2]['address']) ########################### # RPC disconnectnode test # ########################### url = urlparse.urlparse(self.nodes[1].url) self.nodes[0].disconnectnode(url.hostname+":"+str(p2p_port(1))) time.sleep(2) #disconnecting a node needs a little bit of time for node in self.nodes[0].getpeerinfo(): assert(node['addr'] != url.hostname+":"+str(p2p_port(1))) connect_nodes_bi(self.nodes,0,1) #reconnect the node found = False for node in self.nodes[0].getpeerinfo(): if node['addr'] == url.hostname+":"+str(p2p_port(1)): found = True assert(found) if __name__ == '__main__': NodeHandlingTest ().main () #----------------------------------------------------------------------- #Copyright 2013 Centrum Wiskunde & Informatica, Amsterdam # #Author: Daniel M. Pelt #Contact: D.M.Pelt@cwi.nl #Website: http://dmpelt.github.io/pyastratoolbox/ # # #This file is part of the Python interface to the #All Scale Tomographic Reconstruction Antwerp Toolbox ("ASTRA Toolbox"). # #The Python interface to the ASTRA Toolbox is free software: you can redistribute it and/or modify #it under the terms of the GNU General Public License as published by #the Free Software Foundation, either version 3 of the License, or #(at your option) any later version. # #The Python interface to the ASTRA Toolbox is distributed in the hope that it will be useful, #but WITHOUT ANY WARRANTY; without even the implied warranty of #MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #GNU General Public License for more details. # #You should have received a copy of the GNU General Public License #along with the Python interface to the ASTRA Toolbox. If not, see . # #----------------------------------------------------------------------- from . import astra_c as a def credits(): """Print credits of the ASTRA Toolbox.""" return a.credits() def use_cuda(): """Test if CUDA is enabled. :returns: :class:`bool` -- ``True`` if CUDA is enabled. """ return a.use_cuda() def version(printToScreen=False): """Check version of the ASTRA Toolbox. :param printToScreen: If ``True``, print version string. If ``False``, return version integer. :type printToScreen: :class:`bool` :returns: :class:`string` or :class:`int` -- The version string or integer. """ return a.version(printToScreen) def set_gpu_index(idx): """Set default GPU index to use. :param idx: GPU index :type idx: :class:`int` """ a.set_gpu_index(idx) import argparse import ast import sys from keras_wrapper.extra.read_write import pkl2dict def parse_args(): parser = argparse.ArgumentParser("Rebuilds a python file (like config.py) from a given config instance.") parser.add_argument("-c", "--config", required=False, help="Config pkl for loading the model configuration. " "If not specified, hyperparameters " "are read from config.py") parser.add_argument("-d", "--dest", required=False, type=str, default=None, help="Destination file. If unspecidied, standard output") parser.add_argument("-ch", "--changes", nargs="*", help="Changes to the config. Following the syntax Key=Value", default="") return parser.parse_args() if __name__ == "__main__": args = parse_args() if args.config is None: from config import load_parameters params = load_parameters() else: params = pkl2dict(args.config) try: for arg in args.changes: try: k, v = arg.split('=') except ValueError: print 'Overwritten arguments must have the form key=Value. \n Currently are: %s' % str(args.changes) exit(1) try: params[k] = ast.literal_eval(v) except ValueError: params[k] = v except ValueError: print 'Error processing arguments: (', k, ",", v, ")" exit(2) if args.dest is not None: print args.dest output = open(args.dest, 'w') else: output = sys.stdout # Print header output.write('def load_parameters():\n') output.write('\t"""\n') output.write('\tLoads the defined hyperparameters\n') output.write('\t:return parameters: Dictionary of loaded parameters\n') output.write('\t"""\n') for key, value in params.iteritems(): output.write('\t' + key + '=' + str(value) + '\n') # Print ending output.write('\t# ================================================ #\n') output.write('\tparameters = locals().copy()\n') output.write('\treturn parameters\n') if args.dest is not None: output.close() # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## { 'name': 'Check Writing', 'version': '1.1', 'author': 'OpenERP SA, NovaPoint Group', 'category': 'Generic Modules/Accounting', 'description': """ Module for the Check Writing and Check Printing. ================================================ """, 'website': 'https://www.odoo.com/page/accounting', 'depends' : ['account_voucher'], 'data': [ 'wizard/account_check_batch_printing_view.xml', 'account_view.xml', 'account_voucher_view.xml', 'account_check_writing_data.xml', 'data/report_paperformat.xml', 'views/report_check.xml', 'account_check_writing_report.xml', ], 'demo': ['account_demo.xml'], 'test': [], 'installable': True, 'active': False, } # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: #!/usr/bin/python # # simple script to recurse a subtree, find all the mp3 and queue them to # XMMS. # # Please modify this script! My python is rusty at best. # # Travis Hume -- travis@usermail.com # Thu Oct 24 11:06:54 2002 # # Barak Korren - ifireball@yahoo.com # Sat Apr 03 2004 # Some bugfixes, now preserves alphanumerical file-ordering in # sub-directories import sys, glob, os, os.path, dircache def isAudioFile( f ): # to support additional file types just add their appropriate # extentions to this list (lower case). file_types = ['.mp3','.ogg','.wav'] p,ext = os.path.splitext(f) try: file_types.index(ext.lower()) except: return False return True # change this to something other than None to make the script # follow symlinks follow_links = None def find_mp3s( dirs=None ): """ finds all mp3 files rooted at dirs and returns them as a list """ if not dirs: return [] mp3s = [] while dirs: if os.path.isfile(dirs[0]) and isAudioFile(dirs[0]): mp3s.append(dirs[0]) dirs = dirs[1:] elif os.path.isdir(dirs[0]): found_dirs = [] for f in dircache.listdir( dirs[0] ): p = dirs[0] + "/" + f; if os.path.isfile(p) and isAudioFile(p): mp3s.append( p ) elif os.path.isdir( p ) and not f.endswith( "/proc" ): if not os.path.islink( p ) or follow_links: found_dirs.append( p ) dirs = found_dirs + dirs[1:] return mp3s dirs = sys.argv[1:] dirs.reverse() mp3s = find_mp3s( dirs ) #inf = ""; #for mp3 in mp3s: # inf = inf + '"' + mp3 + '"' + "\n" #os.execvp("zenity", ['zenity','--info','--text=' + inf] ) os.execvp("xmms", ['xmms','-p'] + mp3s ) # This file is part of Shuup. # # Copyright (c) 2012-2016, Shoop Ltd. All rights reserved. # # This source code is licensed under the AGPLv3 license found in the # LICENSE file in the root directory of this source tree. from __future__ import unicode_literals from django import forms from django.conf import settings from django.utils.translation import ugettext_lazy as _ from shuup.admin.form_part import FormPart, TemplatedFormDef from shuup.campaigns.admin_module.forms import ( BasketCampaignForm, CatalogCampaignForm ) from shuup.campaigns.models import ContactGroupSalesRange from shuup.core.models import Shop, ShopStatus from shuup.core.models._contacts import PROTECTED_CONTACT_GROUP_IDENTIFIERS from .form_sets import ( BasketConditionsFormSet, BasketDiscountEffectsFormSet, BasketLineEffectsFormSet, CatalogConditionsFormSet, CatalogEffectsFormSet, CatalogFiltersFormSet ) class SalesRangesForm(forms.ModelForm): class Meta: model = ContactGroupSalesRange fields = ["min_value", "max_value"] labels = { "min_value": _("Minimum value"), "max_value": _("Maximum value") } help_texts = { "max_value": _("Leave empty for no maximum") } def __init__(self, **kwargs): super(SalesRangesForm, self).__init__(**kwargs) class SalesRangesFormPart(FormPart): priority = 3 name = "contact_group_sales_ranges" form = SalesRangesForm def __init__(self, request, object=None): super(SalesRangesFormPart, self).__init__(request, object) self.shops = Shop.objects.filter(status=ShopStatus.ENABLED) def _get_form_name(self, shop): return "%d-%s" % (shop.pk, self.name) def get_form_defs(self): if not self.object.pk or self.object.identifier in PROTECTED_CONTACT_GROUP_IDENTIFIERS: return for shop in self.shops: instance, _ = ContactGroupSalesRange.objects.get_or_create(group=self.object, shop=shop) yield TemplatedFormDef( name=self._get_form_name(shop), form_class=self.form, template_name="shuup/campaigns/admin/sales_ranges_form_part.jinja", required=False, kwargs={"instance": instance} ) def form_valid(self, form): form_names = [self._get_form_name(shop) for shop in self.shops] forms = [form.forms[name] for name in form_names if name in form.forms] for form in forms: if form.changed_data: form.save() class CampaignBaseFormPart(FormPart): priority = -1000 # Show this first form = None # Override in subclass def __init__(self, *args, **kwargs): super(CampaignBaseFormPart, self).__init__(*args, **kwargs) def get_form_defs(self): yield TemplatedFormDef( "base", self.form, required=True, template_name="shuup/campaigns/admin/_edit_base_form.jinja", kwargs={"instance": self.object, "languages": settings.LANGUAGES, "request": self.request} ) def form_valid(self, form): self.object = form["base"].save() return self.object class CatalogBaseFormPart(CampaignBaseFormPart): form = CatalogCampaignForm class BasketBaseFormPart(CampaignBaseFormPart): form = BasketCampaignForm class BaseFormPart(FormPart): formset = None template_name = "shuup/campaigns/admin/_edit_form.jinja" def __init__(self, request, form, name, owner): self.name = name self.form = form super(BaseFormPart, self).__init__(request, object=owner) def get_form_defs(self): yield TemplatedFormDef( self.name, self.formset, self.template_name, required=False, kwargs={"form": self.form, "owner": self.object}, ) def form_valid(self, form): component_form = form.forms[self.name] component_form.save() for component in component_form.new_objects: if self.name.startswith("conditions"): self.object.conditions.add(component) elif self.name.startswith("filters"): self.object.filters.add(component) class BasketConditionsFormPart(BaseFormPart): formset = BasketConditionsFormSet class BasketDiscountEffectsFormPart(BaseFormPart): formset = BasketDiscountEffectsFormSet class BasketLineEffectsFormPart(BaseFormPart): formset = BasketLineEffectsFormSet class CatalogConditionsFormPart(BaseFormPart): formset = CatalogConditionsFormSet class CatalogFiltersFormPart(BaseFormPart): formset = CatalogFiltersFormSet class CatalogEffectsFormPart(BaseFormPart): formset = CatalogEffectsFormSet # Copyright Bruno da Silva de Oliveira 2003. Use, modification and # distribution is subject to the Boost Software License, Version 1.0. # (See accompanying file LICENSE_1_0.txt or copy at # http://www.boost.org/LICENSE_1_0.txt) ''' Various helpers for interface files. ''' from settings import * from policies import * from declarations import * #============================================================================== # FunctionWrapper #============================================================================== class FunctionWrapper(object): '''Holds information about a wrapper for a function or a method. It is divided in 2 parts: the name of the Wrapper, and its code. The code is placed in the declaration section of the module, while the name is used to def' the function or method (with the pyste namespace prepend to it). If code is None, the name is left unchanged. ''' def __init__(self, name, code=None): self.name = name self.code = code def FullName(self): if self.code: return namespaces.pyste + self.name else: return self.name _printed_warnings = {} # used to avoid double-prints of warnings #============================================================================== # HandlePolicy #============================================================================== def HandlePolicy(function, policy): '''Show a warning to the user if the function needs a policy and doesn't have one. Return a policy to the function, which is the given policy itself if it is not None, or a default policy for this method. ''' def IsString(type): 'Return True if the Type instance can be considered a string' return type.FullName() == 'const char*' def IsPyObject(type): return type.FullName() == '_object *' # internal name of PyObject result = function.result # if the function returns const char*, a policy is not needed if IsString(result) or IsPyObject(result): return policy # if returns a const T&, set the default policy if policy is None and result.const and isinstance(result, ReferenceType): policy = return_value_policy(copy_const_reference) # basic test if the result type demands a policy needs_policy = isinstance(result, (ReferenceType, PointerType)) # show a warning to the user, if needed if needs_policy and policy is None: global _printed_warnings warning = '---> Error: %s returns a pointer or a reference, ' \ 'but no policy was specified.' % function.FullName() if warning not in _printed_warnings: print warning print # avoid double prints of the same warning _printed_warnings[warning] = 1 return policy #============================================================================== # EspecializeTypeID #============================================================================== _exported_type_ids = {} def EspecializeTypeID(typename): global _exported_type_ids macro = 'BOOST_PYTHON_OPAQUE_SPECIALIZED_TYPE_ID(%s)\n' % typename if macro not in _exported_type_ids: _exported_type_ids[macro] = 1 return macro else: return None import pytest from mne import open_docs, grade_to_tris from mne.epochs import add_channels_epochs from mne.utils import (copy_function_doc_to_method_doc, copy_doc, linkcode_resolve, deprecated, deprecated_alias) import webbrowser @pytest.mark.parametrize('obj', (grade_to_tris, add_channels_epochs)) def test_doc_filling(obj): """Test that docs are filled properly.""" doc = obj.__doc__ assert 'verbose : ' in doc if obj is add_channels_epochs: assert 'keyword-argument only. Defaults to True if' in doc def test_deprecated_alias(): """Test deprecated_alias.""" def new_func(): """Do something.""" pass deprecated_alias('old_func', new_func) assert old_func # noqa assert 'has been deprecated in favor of new_func' in old_func.__doc__ # noqa assert 'deprecated' not in new_func.__doc__ @deprecated('message') def deprecated_func(): """Do something.""" pass @deprecated('message') class deprecated_class(object): def __init__(self): pass def test_deprecated(): """Test deprecated function.""" pytest.deprecated_call(deprecated_func) pytest.deprecated_call(deprecated_class) def test_copy_doc(): """Test decorator for copying docstrings.""" class A: def m1(): """Docstring for m1.""" pass class B: def m1(): pass class C (A): @copy_doc(A.m1) def m1(): pass assert C.m1.__doc__ == 'Docstring for m1.' pytest.raises(ValueError, copy_doc(B.m1), C.m1) def test_copy_function_doc_to_method_doc(): """Test decorator for re-using function docstring as method docstrings.""" def f1(object, a, b, c): """Docstring for f1. Parameters ---------- object : object Some object. This description also has blank lines in it. a : int Parameter a b : int Parameter b """ pass def f2(object): """Docstring for f2. Parameters ---------- object : object Only one parameter Returns ------- nothing. """ pass def f3(object): """Docstring for f3. Parameters ---------- object : object Only one parameter """ pass def f4(object): """Docstring for f4.""" pass def f5(object): # noqa: D410, D411, D414 """Docstring for f5. Parameters ---------- Returns ------- nothing. """ pass class A: @copy_function_doc_to_method_doc(f1) def method_f1(self, a, b, c): pass @copy_function_doc_to_method_doc(f2) def method_f2(self): "method_f3 own docstring" pass @copy_function_doc_to_method_doc(f3) def method_f3(self): pass assert A.method_f1.__doc__ == """Docstring for f1. Parameters ---------- a : int Parameter a b : int Parameter b """ assert A.method_f2.__doc__ == """Docstring for f2. Returns ------- nothing. method_f3 own docstring""" assert A.method_f3.__doc__ == 'Docstring for f3.\n\n ' pytest.raises(ValueError, copy_function_doc_to_method_doc(f5), A.method_f1) def myfun(x): """Check url.""" assert 'mne.tools' in x def test_open_docs(): """Test doc launching.""" old_tab = webbrowser.open_new_tab try: # monkey patch temporarily to prevent tabs from actually spawning webbrowser.open_new_tab = myfun open_docs() open_docs('tutorials', 'dev') open_docs('examples', 'stable') pytest.raises(ValueError, open_docs, 'foo') pytest.raises(ValueError, open_docs, 'api', 'foo') finally: webbrowser.open_new_tab = old_tab def test_linkcode_resolve(): """Test linkcode resolving.""" ex = '#L' url = linkcode_resolve('py', dict(module='mne', fullname='Epochs')) assert '/mne/epochs.py' + ex in url url = linkcode_resolve('py', dict(module='mne', fullname='compute_covariance')) assert '/mne/cov.py' + ex in url url = linkcode_resolve('py', dict(module='mne', fullname='convert_forward_solution')) assert '/mne/forward/forward.py' + ex in url url = linkcode_resolve('py', dict(module='mne', fullname='datasets.sample.data_path')) assert '/mne/datasets/sample/sample.py' + ex in url """Python part of the warnings subsystem.""" # Note: function level imports should *not* be used # in this module as it may cause import lock deadlock. # See bug 683658. import sys, types import linecache __all__ = ["warn", "showwarning", "formatwarning", "filterwarnings", "resetwarnings"] # filters contains a sequence of filter 5-tuples # The components of the 5-tuple are: # - an action: error, ignore, always, default, module, or once # - a compiled regex that must match the warning message # - a class representing the warning category # - a compiled regex that must match the module that is being warned # - a line number for the line being warning, or 0 to mean any line # If either if the compiled regexs are None, match anything. filters = [] defaultaction = "default" onceregistry = {} def warn(message, category=None, stacklevel=1): """Issue a warning, or maybe ignore it or raise an exception.""" # Check if message is already a Warning object if isinstance(message, Warning): category = message.__class__ # Check category argument if category is None: category = UserWarning assert issubclass(category, Warning) # Get context information try: caller = sys._getframe(stacklevel) except ValueError: globals = sys.__dict__ lineno = 1 else: globals = caller.f_globals lineno = caller.f_lineno if '__name__' in globals: module = globals['__name__'] else: module = "" filename = globals.get('__file__') if filename: fnl = filename.lower() if fnl.endswith(".pyc") or fnl.endswith(".pyo"): filename = filename[:-1] else: if module == "__main__": filename = sys.argv[0] if not filename: filename = module registry = globals.setdefault("__warningregistry__", {}) warn_explicit(message, category, filename, lineno, module, registry) def warn_explicit(message, category, filename, lineno, module=None, registry=None): if module is None: module = filename if module[-3:].lower() == ".py": module = module[:-3] # XXX What about leading pathname? if registry is None: registry = {} if isinstance(message, Warning): text = str(message) category = message.__class__ else: text = message message = category(message) key = (text, category, lineno) # Quick test for common case if registry.get(key): return # Search the filters for item in filters: action, msg, cat, mod, ln = item if ((msg is None or msg.match(text)) and issubclass(category, cat) and (msg is None or mod.match(module)) and (ln == 0 or lineno == ln)): break else: action = defaultaction # Early exit actions if action == "ignore": registry[key] = 1 return if action == "error": raise message # Other actions if action == "once": registry[key] = 1 oncekey = (text, category) if onceregistry.get(oncekey): return onceregistry[oncekey] = 1 elif action == "always": pass elif action == "module": registry[key] = 1 altkey = (text, category, 0) if registry.get(altkey): return registry[altkey] = 1 elif action == "default": registry[key] = 1 else: # Unrecognized actions are errors raise RuntimeError( "Unrecognized action (%r) in warnings.filters:\n %s" % (action, item)) # Print message and context showwarning(message, category, filename, lineno) def showwarning(message, category, filename, lineno, file=None): """Hook to write a warning to a file; replace if you like.""" if file is None: file = sys.stderr try: file.write(formatwarning(message, category, filename, lineno)) except IOError: pass # the file (probably stderr) is invalid - this warning gets lost. def formatwarning(message, category, filename, lineno): """Function to format a warning the standard way.""" s = "%s:%s: %s: %s\n" % (filename, lineno, category.__name__, message) line = linecache.getline(filename, lineno).strip() if line: s = s + " " + line + "\n" return s def filterwarnings(action, message="", category=Warning, module="", lineno=0, append=0): """Insert an entry into the list of warnings filters (at the front). Use assertions to check that all arguments have the right type.""" import re assert action in ("error", "ignore", "always", "default", "module", "once"), "invalid action: %r" % (action,) assert isinstance(message, basestring), "message must be a string" assert isinstance(category, types.ClassType), "category must be a class" assert issubclass(category, Warning), "category must be a Warning subclass" assert isinstance(module, basestring), "module must be a string" assert isinstance(lineno, int) and lineno >= 0, \ "lineno must be an int >= 0" item = (action, re.compile(message, re.I), category, re.compile(module), lineno) if append: filters.append(item) else: filters.insert(0, item) def simplefilter(action, category=Warning, lineno=0, append=0): """Insert a simple entry into the list of warnings filters (at the front). A simple filter matches all modules and messages. """ assert action in ("error", "ignore", "always", "default", "module", "once"), "invalid action: %r" % (action,) assert isinstance(lineno, int) and lineno >= 0, \ "lineno must be an int >= 0" item = (action, None, category, None, lineno) if append: filters.append(item) else: filters.insert(0, item) def resetwarnings(): """Clear the list of warning filters, so that no filters are active.""" filters[:] = [] class _OptionError(Exception): """Exception used by option processing helpers.""" pass # Helper to process -W options passed via sys.warnoptions def _processoptions(args): for arg in args: try: _setoption(arg) except _OptionError, msg: print >>sys.stderr, "Invalid -W option ignored:", msg # Helper for _processoptions() def _setoption(arg): import re parts = arg.split(':') if len(parts) > 5: raise _OptionError("too many fields (max 5): %r" % (arg,)) while len(parts) < 5: parts.append('') action, message, category, module, lineno = [s.strip() for s in parts] action = _getaction(action) message = re.escape(message) category = _getcategory(category) module = re.escape(module) if module: module = module + '$' if lineno: try: lineno = int(lineno) if lineno < 0: raise ValueError except (ValueError, OverflowError): raise _OptionError("invalid lineno %r" % (lineno,)) else: lineno = 0 filterwarnings(action, message, category, module, lineno) # Helper for _setoption() def _getaction(action): if not action: return "default" if action == "all": return "always" # Alias for a in ['default', 'always', 'ignore', 'module', 'once', 'error']: if a.startswith(action): return a raise _OptionError("invalid action: %r" % (action,)) # Helper for _setoption() def _getcategory(category): import re if not category: return Warning if re.match("^[a-zA-Z0-9_]+$", category): try: cat = eval(category) except NameError: raise _OptionError("unknown warning category: %r" % (category,)) else: i = category.rfind(".") module = category[:i] klass = category[i+1:] try: m = __import__(module, None, None, [klass]) except ImportError: raise _OptionError("invalid module name: %r" % (module,)) try: cat = getattr(m, klass) except AttributeError: raise _OptionError("unknown warning category: %r" % (category,)) if (not isinstance(cat, types.ClassType) or not issubclass(cat, Warning)): raise _OptionError("invalid warning category: %r" % (category,)) return cat # Module initialization _processoptions(sys.warnoptions) # XXX OverflowWarning should go away for Python 2.5. simplefilter("ignore", category=OverflowWarning, append=1) simplefilter("ignore", category=PendingDeprecationWarning, append=1) # Copyright 2015 Matthew Rogge # # This file is part of Retr3d. # # Retr3d is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Retr3d is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Retr3d. If not, see . #import Math stuff from __future__ import division # allows floating point division from integersimport math import math from itertools import product #import FreeCAD modules import FreeCAD as App import FreeCAD# as # import Part import Sketcher import Draft #Specific to printer import globalVars as gv import utilityFunctions as uf class SideBarTopR(object): def __init__(self): self.name = "sideBarTopR" def assemble(self): App.ActiveDocument=App.getDocument(self.name) shape = App.ActiveDocument.ActiveObject.Shape App.ActiveDocument=App.getDocument("PrinterAssembly") App.ActiveDocument.addObject('Part::Feature',self.name).Shape= shape #Color Part #Get the feature and move it into position objs = App.ActiveDocument.getObjectsByLabel(self.name) shape = objs[-1] #Rotate into correct orientation rotateAngle = 90 rotateCenter = App.Vector(0,0,0) rotateAxis = App.Vector(0,0,1) Draft.rotate([shape],rotateAngle,rotateCenter,axis = rotateAxis,copy=False) #Define shifts and move the left clamp into place xShift = +gv.zRodSpacing/2 yShift = -gv.yRodLength/2 + gv.frameWidth zShift = -gv.yRodStandoff - gv.frameHeight/2 App.ActiveDocument=App.getDocument("PrinterAssembly") Draft.move([shape],App.Vector(xShift, yShift, zShift),copy=False) App.ActiveDocument.recompute() def draw(self): try: App.getDocument(self.name).recompute() App.closeDocument(self.name) App.setActiveDocument("") App.ActiveDocument=None except: pass #make document App.newDocument(self.name) App.setActiveDocument(self.name) App.ActiveDocument=App.getDocument(self.name) #extrude side bar uf.extrudeFrameMember(self.name, gv.sideBarLength) if gv.zMotorMountLocation == "Top": return #Add holes for zMotorMount #Sketch points p1x = 0 p1y = 0 p2x = 0 p2y = gv.sideBarLength/2+gv.extruderNozzleStandoff-gv.zMotorMountPlateWidth/4 p3x = 0 p3y = gv.sideBarLength/2+gv.extruderNozzleStandoff-3*gv.zMotorMountPlateWidth/4 #Make Sketch App.activeDocument().addObject('Sketcher::SketchObject','Sketch001') App.activeDocument().Sketch001.Support = uf.getFace(App.ActiveDocument.Pad, gv.sideBarLength/2, 0, None, None, gv.frameHeight/2, 0) App.activeDocument().recompute() App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p1x,p1y,0),App.Vector(p2x,p2y,0))) App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',-1,1,0,1)) App.ActiveDocument.recompute() App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Vertical',0)) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addGeometry(Part.Line(App.Vector(p2x,p2y,0),App.Vector(p3x,p3y,0))) App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',0,2,1,1)) App.ActiveDocument.recompute() App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Vertical',1)) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.toggleConstruction(1) App.ActiveDocument.Sketch001.toggleConstruction(0) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addGeometry(Part.Circle(App.Vector(p3x,p3y,0),App.Vector(0,0,1),gv.mountToFrameDia/2)) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',2,3,1,2)) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addGeometry(Part.Circle(App.Vector(p2x,p2y,0),App.Vector(0,0,1),gv.mountToFrameDia/2)) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Coincident',3,3,0,2)) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Equal',2,3)) App.ActiveDocument.recompute() #Add Dimensions App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('Radius',2,gv.mountToFrameDia/2)) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('DistanceY',1,gv.zMotorMountPlateWidth/2)) App.ActiveDocument.recompute() App.ActiveDocument.Sketch001.addConstraint(Sketcher.Constraint('DistanceY',0,p3y)) App.ActiveDocument.recompute() App.getDocument(self.name).recompute() #Cut holes through Bar App.activeDocument().addObject("PartDesign::Pocket","Pocket") App.activeDocument().Pocket.Sketch = App.activeDocument().Sketch001 App.activeDocument().Pocket.Length = 5.0 App.ActiveDocument.recompute() App.ActiveDocument.Pocket.Length = 5.000000 App.ActiveDocument.Pocket.Type = 1 App.ActiveDocument.Pocket.UpToFace = None App.ActiveDocument.recompute() #!/usr/bin/env python ''' plasmac_stats.py Copyright (C) 2019 Phillip A Carter Inspired by and some parts copied from the work of John (islander261 on the LinuxCNC forum) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. ''' import os import gtk import linuxcnc import gobject import hal import hal_glib import time from gladevcp.persistence import IniFile from gladevcp.persistence import widget_defaults from gladevcp.persistence import select_widgets from gmoccapy import getiniinfo class HandlerClass: def set_theme(self): theme = gtk.settings_get_default().get_property('gtk-theme-name') if os.path.exists(self.prefFile): try: with open(self.prefFile, 'r') as f_in: for line in f_in: if 'gtk_theme' in line and not 'Follow System Theme' in line: (item, theme) = line.strip().replace(" ", "").split('=') except: print('*** configuration file, {} is invalid ***'.format(self.prefFile)) gtk.settings_get_default().set_property('gtk-theme-name', theme) def pierce_count_changed(self,halpin): if hal.get_value('plasmac_stats.state') >= self.TORCH_ON: self.PIERCE_COUNT += 1 self.pierce_count += 1 self.builder.get_object('pierce-countT').set_label('{:d}'.format(self.PIERCE_COUNT)) self.builder.get_object('pierce-count').set_label('{:d}'.format(self.pierce_count)) def cut_length_changed(self,halpin): if halpin.get(): self.thisCutLength = halpin.get() if hal.get_value('halui.machine.units-per-mm') == 1: self.builder.get_object('cut-lengthT').set_label('{:.2f} M'.format((self.CUT_LENGTH + self.thisCutLength) * 0.001)) self.builder.get_object('cut-length').set_label('{:.2f} M'.format((self.cut_length + self.thisCutLength) * 0.001)) else: self.builder.get_object('cut-lengthT').set_label('{:.2f}\"'.format(self.CUT_LENGTH + self.thisCutLength)) self.builder.get_object('cut-length').set_label('{:.2f}\"'.format(self.cut_length + self.thisCutLength)) else: self.CUT_LENGTH += self.thisCutLength self.cut_length += self.thisCutLength if hal.get_value('halui.machine.units-per-mm') == 1: self.builder.get_object('cut-lengthT').set_label('{:.2f} M'.format(self.CUT_LENGTH * 0.001)) else: self.builder.get_object('cut-lengthT').set_label('{:.2f}\"'.format(self.CUT_LENGTH)) self.thisCutLength = 0 def cut_time_changed(self,halpin): if halpin.get(): self.thisCutTime = halpin.get() self.display_time('cut-timeT', self.CUT_TIME + self.thisCutTime) self.display_time('cut-time', self.cut_time + self.thisCutTime) else: self.CUT_TIME += self.thisCutTime self.cut_time += self.thisCutTime self.display_time('cut-timeT', self.CUT_TIME) thisCutTime = 0 def torch_on_changed(self,halpin): if halpin.get() and not self.torchOn: self.torchStart = time.time() elif not halpin.get() and self.torchOn: self.TORCH_TIME += (time.time() - self.torchStart) self.torch_time += (time.time() - self.torchStart) self.display_time('torch-timeT', self.TORCH_TIME) self.torchOn = halpin.get() def prog_run_changed(self,halpin): if halpin.get() and not self.progRun: self.clear_job_values() self.runStart = time.time() self.progRun = True def prog_idle_changed(self,halpin): if halpin.get() and self.progRun: self.RUN_TIME += (time.time() - self.runStart) self.display_time('run-timeT', self.RUN_TIME) self.progRun = False def motion_type_changed(self,halpin): if halpin.get() == 1 and self.oldMotionType != 1: self.rapidStart = time.time() self.rapidOn = True elif halpin.get() != 1 and self.oldMotionType == 1: self.RAPID_TIME += (time.time() - self.rapidStart) self.rapid_time += (time.time() - self.rapidStart) self.display_time('rapid-timeT', self.RAPID_TIME) self.rapidOn = False self.oldMotionType = halpin.get() def state_changed(self,halpin): if halpin.get() == self.PROBE_HEIGHT and self.oldState == self.IDLE: self.probeStart = time.time() self.probeOn = True elif (halpin.get() > self.ZERO_HEIGHT or halpin.get() == self.IDLE) and self.probeOn: self.PROBE_TIME += (time.time() - self.probeStart) self.probe_time += (time.time() - self.probeStart) self.display_time('probe-timeT', self.PROBE_TIME) self.probeOn = False self.oldState = halpin.get() def pierce_reset(self,halbutton): self.PIERCE_COUNT = 0 self.builder.get_object('pierce-countT').set_label('{:d}'.format(self.PIERCE_COUNT)) def cut_length_reset(self,halbutton): self.CUT_LENGTH = 0.0 self.builder.get_object('cut-lengthT').set_label('{:.2f}'.format(self.CUT_LENGTH)) def cut_time_reset(self,halbutton): self.CUT_TIME = 0.0 self.display_time('cut-timeT', self.CUT_TIME) def torch_time_reset(self,halbutton): self.TORCH_TIME = 0.0 self.display_time('torch-timeT', self.TORCH_TIME) def run_time_reset(self,halbutton): self.RUN_TIME = 0.0 self.display_time('run-timeT', self.RUN_TIME) def rapid_time_reset(self,halbutton): self.RAPID_TIME = 0.0 self.display_time('rapid-timeT', self.RAPID_TIME) def probe_time_reset(self,halbutton): self.PROBE_TIME = 0.0 self.display_time('probe-timeT', self.PROBE_TIME) def clear_job_values(self): self.pierce_count = 0 self.builder.get_object('pierce-count').set_label('{:d}'.format(self.pierce_count)) self.cut_length = 0 self.builder.get_object('cut-length').set_label('{:.2f}'.format(self.cut_length)) self.cut_time = 0 self.display_time('cut-time', self.cut_time) self.torch_time = 0 self.display_time('torch-time', self.torch_time) self.display_time('run-time', 0) self.rapid_time = 0 self.display_time('rapid-time', self.rapid_time) self.probe_time = 0 self.display_time('probe-time', self.probe_time) self.torchOn = False self.progRun = False self.rapidOn = False self.probeOn = False def all_reset(self,halbutton): self.pierce_reset(0) self.cut_length_reset(0) self.cut_time_reset(0) self.torch_time_reset(0) self.run_time_reset(0) self.rapid_time_reset(0) self.probe_time_reset(0) def display_time(self,widget,time): m, s = divmod(time, 60) h, m = divmod(m, 60) self.builder.get_object(widget).set_label('{:.0f}:{:02.0f}:{:02.0f}'.format(h,m,s)) def on_stats_box_destroy(self, obj, data = None): self.ini.save_state(self) def on_unix_signal(self,signum,stack_frame): self.ini.save_state(self) def periodic(self): if self.torchOn: self.display_time('torch-timeT', self.TORCH_TIME + (time.time() - self.torchStart)) self.display_time('torch-time', self.torch_time + (time.time() - self.torchStart)) if self.progRun: self.display_time('run-timeT', self.RUN_TIME + (time.time() - self.runStart)) self.display_time('run-time', time.time() - self.runStart) if self.rapidOn: self.display_time('rapid-timeT', self.RAPID_TIME + (time.time() - self.rapidStart)) self.display_time('rapid-time', self.rapid_time + (time.time() - self.rapidStart)) if self.probeOn: self.display_time('probe-timeT', self.PROBE_TIME + (time.time() - self.probeStart)) self.display_time('probe-time', self.probe_time + (time.time() - self.probeStart)) return True def __init__(self, halcomp,builder,useropts): self.halcomp = halcomp self.builder = builder self.i = linuxcnc.ini(os.environ['INI_FILE_NAME']) self.prefFile = self.i.find('EMC', 'MACHINE') + '.pref' self.set_theme() self.pierceCount = hal_glib.GPin(halcomp.newpin('pierce-count', hal.HAL_S32, hal.HAL_IN)) self.pierceCount.connect('value-changed', self.pierce_count_changed) self.cutLength = hal_glib.GPin(halcomp.newpin('cut-length', hal.HAL_FLOAT, hal.HAL_IN)) self.cutLength.connect('value-changed', self.cut_length_changed) self.cutTime = hal_glib.GPin(halcomp.newpin('cut-time', hal.HAL_FLOAT, hal.HAL_IN)) self.cutTime.connect('value-changed', self.cut_time_changed) self.torchOn = hal_glib.GPin(halcomp.newpin('torch-on', hal.HAL_BIT, hal.HAL_IN)) self.torchOn.connect('value-changed', self.torch_on_changed) self.progRun = hal_glib.GPin(halcomp.newpin('program-is-running', hal.HAL_BIT, hal.HAL_IN)) self.progRun.connect('value-changed', self.prog_run_changed) self.progIdle = hal_glib.GPin(halcomp.newpin('program-is-idle', hal.HAL_BIT, hal.HAL_IN)) self.progIdle.connect('value-changed', self.prog_idle_changed) self.statePin = hal_glib.GPin(halcomp.newpin('state', hal.HAL_S32, hal.HAL_IN)) self.statePin.connect('value-changed', self.state_changed) self.rapidTime = hal_glib.GPin(halcomp.newpin('motion-type', hal.HAL_S32, hal.HAL_IN)) self.rapidTime.connect('value-changed', self.motion_type_changed) self.pierceReset = self.builder.get_object('pierce-count-reset') self.pierceReset.connect('pressed', self.pierce_reset) self.cutLengthReset = self.builder.get_object('cut-length-reset') self.cutLengthReset.connect('pressed', self.cut_length_reset) self.cutTimeReset = self.builder.get_object('cut-time-reset') self.cutTimeReset.connect('pressed', self.cut_time_reset) self.torchTimeReset = self.builder.get_object('torch-time-reset') self.torchTimeReset.connect('pressed', self.torch_time_reset) self.runTimeReset = self.builder.get_object('run-time-reset') self.runTimeReset.connect('pressed', self.run_time_reset) self.rapidTimeReset = self.builder.get_object('rapid-time-reset') self.rapidTimeReset.connect('pressed', self.rapid_time_reset) self.probeTimeReset = self.builder.get_object('probe-time-reset') self.probeTimeReset.connect('pressed', self.probe_time_reset) self.allReset = self.builder.get_object('all-reset') self.allReset.connect('pressed', self.all_reset) # plasmac states self.IDLE = 0 self.PROBE_HEIGHT = 1 self.PROBE_DOWN = 2 self.PROBE_UP = 3 self.ZERO_HEIGHT = 4 self.PIERCE_HEIGHT = 5 self.TORCH_ON = 6 self.ARC_OK = 7 self.PIERCE_DELAY = 8 self.PUDDLE_JUMP = 9 self.CUT_HEGHT = 10 self.CUTTING = 11 self.SAFE_HEIGHT = 12 self.MAX_HEIGHT = 13 self.FINISH = 14 self.TORCH_PULSE = 15 self.PAUSED_MOTION = 16 self.OHMIC_TEST = 17 self.PROBE_TEST = 18 self.oldState = 0 self.oldMotionType = 0 self.pierce_count = 0 self.cut_length = 0 self.thisCutLength = 0 self.cut_time = 0.0 self.thisCutTime = 0.0 self.torch_time = 0.0 self.torchOn = False self.progRun = False self.rapid_time = 0.0 self.rapidOn = False self.probe_time = 0.0 self.probeOn = False self.defaults = {IniFile.vars:{"PIERCE_COUNT" : 0, "CUT_LENGTH" : 0.0, "CUT_TIME" : 0.0, "TORCH_TIME" : 0.0, "RUN_TIME" : 0.0, "RAPID_TIME" : 0.0, "PROBE_TIME" : 0.0, }, } get_ini_info = getiniinfo.GetIniInfo() self.ini_filename = __name__ + ".var" self.ini = IniFile(self.ini_filename, self.defaults, self.builder) self.ini.restore_state(self) self.builder.get_object('pierce-countT').set_label('{:d}'.format(self.PIERCE_COUNT)) self.builder.get_object('pierce-count').set_label('{:d}'.format(0)) if hal.get_value('halui.machine.units-per-mm') == 1: self.builder.get_object('cut-lengthT').set_label('{:0.2f} M'.format(self.CUT_LENGTH * 0.001)) self.builder.get_object('cut-length').set_label('{:0.2f} M'.format(0)) else: self.builder.get_object('cut-lengthT').set_label('{:0.2f}\"'.format(self.CUT_LENGTH)) self.builder.get_object('cut-length').set_label('{:0.2f}\"'.format(0)) self.display_time('cut-timeT', self.CUT_TIME) self.display_time('torch-timeT', self.TORCH_TIME) self.display_time('run-timeT', self.RUN_TIME) self.display_time('rapid-timeT', self.RAPID_TIME) self.display_time('probe-timeT', self.PROBE_TIME) gobject.timeout_add(100, self.periodic) def get_handlers(halcomp,builder,useropts): return [HandlerClass(halcomp,builder,useropts)] """ Utility functions for checking passed arguments against call signature of a function or class constructor. """ import functools import inspect import types from pylearn2.utils.exc import reraise_as from pylearn2.utils.string_utils import match def check_call_arguments(to_call, kwargs): """ Check the call signature against a dictionary of proposed arguments, raising an informative exception in the case of mismatch. Parameters ---------- to_call : class or callable Function or class to examine (in the case of classes, the constructor call signature is analyzed). kwargs : dict Dictionary mapping parameter names (including positional arguments) to proposed values. """ if 'self' in kwargs.keys(): raise TypeError("Your dictionary includes an entry for 'self', " "which is just asking for trouble") orig_to_call = getattr(to_call, '__name__', str(to_call)) if not isinstance(to_call, types.FunctionType): if hasattr(to_call, '__init__'): to_call = to_call.__init__ elif hasattr(to_call, '__call__'): to_call = to_call.__call__ args, varargs, keywords, defaults = inspect.getargspec(to_call) if any(not isinstance(arg, str) for arg in args): raise TypeError('%s uses argument unpacking, which is deprecated and ' 'unsupported by this pylearn2' % orig_to_call) if varargs is not None: raise TypeError('%s has a variable length argument list, but ' 'this is not supported by config resolution' % orig_to_call) if keywords is None: bad_keywords = [arg_name for arg_name in kwargs.keys() if arg_name not in args] if len(bad_keywords) > 0: bad = ', '.join(bad_keywords) args = [ arg for arg in args if arg != 'self' ] if len(args) == 0: matched_str = '(It does not support any keywords, actually)' else: matched = [ match(keyword, args) for keyword in bad_keywords ] matched_str = 'Did you mean %s?' % (', '.join(matched)) raise TypeError('%s does not support the following ' 'keywords: %s. %s' % (orig_to_call, bad, matched_str)) if defaults is None: num_defaults = 0 else: num_defaults = len(defaults) required = args[:len(args) - num_defaults] missing = [arg for arg in required if arg not in kwargs] if len(missing) > 0: #iff the im_self (or __self__) field is present, this is a # bound method, which has 'self' listed as an argument, but # which should not be supplied by kwargs is_bound = hasattr(to_call, 'im_self') or hasattr(to_call, '__self__') if len(missing) > 1 or missing[0] != 'self' or not is_bound: if 'self' in missing: missing.remove('self') missing = ', '.join([str(m) for m in missing]) raise TypeError('%s did not get these expected ' 'arguments: %s' % (orig_to_call, missing)) def checked_call(to_call, kwargs): """ Attempt calling a function or instantiating a class with a given set of arguments, raising a more helpful exception in the case of argument mismatch. Parameters ---------- to_call : class or callable Function or class to examine (in the case of classes, the constructor call signature is analyzed). kwargs : dict Dictionary mapping parameter names (including positional arguments) to proposed values. """ try: return to_call(**kwargs) except TypeError: check_call_arguments(to_call, kwargs) raise def sensible_argument_errors(func): """ .. todo:: WRITEME """ @functools.wraps(func) def wrapped_func(*args, **kwargs): """ .. todo:: WRITEME """ try: func(*args, **kwargs) except TypeError: argnames, varargs, keywords, defaults = inspect.getargspec(func) posargs = dict(zip(argnames, args)) bad_keywords = [] for keyword in kwargs: if keyword not in argnames: bad_keywords.append(keyword) if len(bad_keywords) > 0: bad = ', '.join(bad_keywords) reraise_as(TypeError('%s() does not support the following ' 'keywords: %s' % (str(func.func_name), bad))) allargsgot = set(list(kwargs.keys()) + list(posargs.keys())) numrequired = len(argnames) - len(defaults) diff = list(set(argnames[:numrequired]) - allargsgot) if len(diff) > 0: reraise_as(TypeError('%s() did not get required args: %s' % (str(func.func_name), ', '.join(diff)))) raise return wrapped_func """ The :mod:`sklearn.metrics` module includes score functions, performance metrics and pairwise metrics and distance computations. """ from .ranking import auc from .ranking import average_precision_score from .ranking import coverage_error from .ranking import label_ranking_average_precision_score from .ranking import label_ranking_loss from .ranking import precision_recall_curve from .ranking import roc_auc_score from .ranking import roc_curve from .ranking import dcg_score from .ranking import ndcg_score from .classification import accuracy_score from .classification import classification_report from .classification import cohen_kappa_score from .classification import confusion_matrix from .classification import f1_score from .classification import fbeta_score from .classification import hamming_loss from .classification import hinge_loss from .classification import jaccard_similarity_score from .classification import log_loss from .classification import matthews_corrcoef from .classification import precision_recall_fscore_support from .classification import precision_score from .classification import recall_score from .classification import zero_one_loss from .classification import brier_score_loss from . import cluster from .cluster import adjusted_mutual_info_score from .cluster import adjusted_rand_score from .cluster import completeness_score from .cluster import consensus_score from .cluster import homogeneity_completeness_v_measure from .cluster import homogeneity_score from .cluster import mutual_info_score from .cluster import normalized_mutual_info_score from .cluster import fowlkes_mallows_score from .cluster import silhouette_samples from .cluster import silhouette_score from .cluster import calinski_harabaz_score from .cluster import v_measure_score from .pairwise import euclidean_distances from .pairwise import pairwise_distances from .pairwise import pairwise_distances_argmin from .pairwise import pairwise_distances_argmin_min from .pairwise import pairwise_kernels from .regression import explained_variance_score from .regression import mean_absolute_error from .regression import mean_squared_error from .regression import mean_squared_log_error from .regression import median_absolute_error from .regression import r2_score from .scorer import make_scorer from .scorer import SCORERS from .scorer import get_scorer __all__ = [ 'accuracy_score', 'adjusted_mutual_info_score', 'adjusted_rand_score', 'auc', 'average_precision_score', 'classification_report', 'cluster', 'completeness_score', 'confusion_matrix', 'consensus_score', 'coverage_error', 'euclidean_distances', 'explained_variance_score', 'f1_score', 'fbeta_score', 'get_scorer', 'hamming_loss', 'hinge_loss', 'homogeneity_completeness_v_measure', 'homogeneity_score', 'jaccard_similarity_score', 'label_ranking_average_precision_score', 'label_ranking_loss', 'log_loss', 'make_scorer', 'matthews_corrcoef', 'mean_absolute_error', 'mean_squared_error', 'mean_squared_log_error', 'median_absolute_error', 'mutual_info_score', 'normalized_mutual_info_score', 'pairwise_distances', 'pairwise_distances_argmin', 'pairwise_distances_argmin_min', 'pairwise_distances_argmin_min', 'pairwise_kernels', 'precision_recall_curve', 'precision_recall_fscore_support', 'precision_score', 'r2_score',