project=project_id, operation=op_resp['name']) op_resp = op_req.execute() if op_resp['status'] != 'DONE': time.sleep(poll_interval) attempts += 1 else: complete = True if op_resp['operationType'] == 'delete': # don't wait for the delete return True elif op_resp['operationType'] in ['insert', 'update', 'patch']: # TODO(supertom): Isolate 'build-new-request' stuff. resource_name_singular = GCPUtils.get_entity_name_from_resource_name( resource_name) if op_resp['operationType'] == 'insert' or 'entity_name' not in parsed_url: parsed_url['entity_name'] = GCPUtils.parse_gcp_url(op_resp['targetLink'])[ 'entity_name'] args = {'project': project_id, resource_name_singular: parsed_url['entity_name']} new_req = resource.get(**args) resp = new_req.execute() return resp else: # assuming multiple entities, do a list call. new_req = resource.list(project=project_id) resp = new_req.execute() return resp else: # operation didn't complete on time. raise GCPOperationTimeoutError("Operation timed out: %s" % ( op_resp['targetLink'])) @staticmethod def build_resource_from_name(client, resource_name): try: method = getattr(client, resource_name) return method() except AttributeError: raise NotImplementedError('%s is not an attribute of %s' % (resource_name, client)) @staticmethod def get_gcp_resource_from_methodId(methodId): try: parts = methodId.split('.') if len(parts) != 3: return None else: return parts[1] except AttributeError: return None @staticmethod def get_entity_name_from_resource_name(resource_name): if not resource_name: return None try: # Chop off global or region prefixes if resource_name.startswith('global'): resource_name = resource_name.replace('global', '') elif resource_name.startswith('regional'): resource_name = resource_name.replace('region', '') # ensure we have a lower case first letter resource_name = resource_name[0].lower() + resource_name[1:] if resource_name[-3:] == 'ies': return resource_name.replace( resource_name[-3:], 'y') if resource_name[-1] == 's': return resource_name[:-1] return resource_name except AttributeError: return None @staticmethod def parse_gcp_url(url): """ Parse GCP urls and return dict of parts. Supported URL structures: /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME /SERVICE/VERSION/'projects'/PROJECT_ID/RESOURCE/ENTITY_NAME/METHOD_NAME /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME /SERVICE/VERSION/'projects'/PROJECT_ID/'global'/RESOURCE/ENTITY_NAME/METHOD_NAME /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME /SERVICE/VERSION/'projects'/PROJECT_ID/LOCATION_TYPE/LOCATION/RESOURCE/ENTITY_NAME/METHOD_NAME :param url: GCP-generated URL, such as a selflink or resource location. :type url: ``str`` :return: dictionary of parts. Includes stanard components of urlparse, plus GCP-specific 'service', 'api_version', 'project' and 'resource_name' keys. Optionally, 'zone', 'region', 'entity_name' and 'method_name', if applicable. :rtype: ``dict`` """ p = urlparse.urlparse(url) if not p: return None else: # we add extra items such as # zone, region and resource_name url_parts = {} url_parts['scheme'] = p.scheme url_parts['host'] = p.netloc url_parts['path'] = p.path if p.path.find('/') == 0: url_parts['path'] = p.path[1:] url_parts['params'] = p.params url_parts['fragment'] = p.fragment url_parts['query'] = p.query url_parts['project'] = None url_parts['service'] = None url_parts['api_version'] = None path_parts = url_parts['path'].split('/') url_parts['service'] = path_parts[0] url_parts['api_version'] = path_parts[1] if path_parts[2] == 'projects': url_parts['project'] = path_parts[3] else: # invalid URL raise GCPInvalidURLError('unable to parse: %s' % url) if 'global' in path_parts: url_parts['global'] = True idx = path_parts.index('global') if len(path_parts) - idx == 4: # we have a resource, entity and method_name url_parts['resource_name'] = path_parts[idx + 1] url_parts['entity_name'] = path_parts[idx + 2] url_parts['method_name'] = path_parts[idx + 3] if len(path_parts) - idx == 3: # we have a resource and entity url_parts['resource_name'] = path_parts[idx + 1] url_parts['entity_name'] = path_parts[idx + 2] if len(path_parts) - idx == 2: url_parts['resource_name'] = path_parts[idx + 1] if len(path_parts) - idx < 2: # invalid URL raise GCPInvalidURLError('unable to parse: %s' % url) elif 'regions' in path_parts or 'zones' in path_parts: idx = -1 if 'regions' in path_parts: idx = path_parts.index('regions') url_parts['region'] = path_parts[idx + 1] else: idx = path_parts.index('zones') url_parts['zone'] = path_parts[idx + 1] if len(path_parts) - idx == 5: # we have a resource, entity and method_name url_parts['resource_name'] = path_parts[idx + 2] url_parts['entity_name'] = path_parts[idx + 3] url_parts['method_name'] = path_parts[idx + 4] if len(path_parts) - idx == 4: # we have a resource and entity url_parts['resource_name'] = path_parts[idx + 2] url_parts['entity_name'] = path_parts[idx + 3] if len(path_parts) - idx == 3: url_parts['resource_name'] = path_parts[idx + 2] if len(path_parts) - idx < 3: # invalid URL raise GCPInvalidURLError('unable to parse: %s' % url) else: # no location in URL. idx = path_parts.index('projects') if len(path_parts) - idx == 5: # we have a resource, entity and method_name url_parts['resource_name'] = path_parts[idx + 2] url_parts['entity_name'] = path_parts[idx + 3] url_parts['method_name'] = path_parts[idx + 4] if len(path_parts) - idx == 4: # we have a resource and entity url_parts['resource_name'] = path_parts[idx + 2] url_parts['entity_name'] = path_parts[idx + 3] if len(path_parts) - idx == 3: url_parts['resource_name'] = path_parts[idx + 2] if len(path_parts) - idx < 3: # invalid URL raise GCPInvalidURLError('unable to parse: %s' % url) return url_parts @staticmethod def build_googleapi_url(project, api_version='v1', service='compute'): return 'https://www.googleapis.com/%s/%s/projects/%s' % (service, api_version, project) @staticmethod def filter_gcp_fields(params, excluded_fields=None): new_params = {} if not excluded_fields: excluded_fields = ['creationTimestamp', 'id', 'kind', 'selfLink', 'fingerprint', 'description'] if isinstance(params, list): new_params = [GCPUtils.filter_gcp_fields( x, excluded_fields) for x in params] elif isinstance(params, dict): for k in params.keys(): if k not in excluded_fields: new_params[k] = GCPUtils.filter_gcp_fields( params[k], excluded_fields) else: new_params = params return new_params @staticmethod def are_params_equal(p1, p2): """ Check if two params dicts are equal. TODO(supertom): need a way to filter out URLs, or they need to be built """ filtered_p1 = GCPUtils.filter_gcp_fields(p1) filtered_p2 = GCPUtils.filter_gcp_fields(p2) if filtered_p1 != filtered_p2: return False return True class GCPError(Exception): pass class GCPOperationTimeoutError(GCPError): pass class GCPInvalidURLError(GCPError): pass #!/usr/bin/env python # # Copyright 2004,2007 Free Software Foundation, Inc. # # This file is part of GNU Radio # # GNU Radio is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3, or (at your option) # any later version. # # GNU Radio is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with GNU Radio; see the file COPYING. If not, write to # the Free Software Foundation, Inc., 51 Franklin Street, # Boston, MA 02110-1301, USA. # """ Check Rx path or USRP Rev 1. This configures the USRP to return a periodic sequence of integers """ from gnuradio import gr from gnuradio import usrp def build_graph (): rx_decim = 32 tb = gr.top_block () usrp_rx = usrp.source_s (0, rx_decim, 1, 0x32103210, usrp.FPGA_MODE_COUNTING) sink = gr.check_counting_s () tb.connect (usrp_rx, sink) # file_sink = gr.file_sink (gr.sizeof_short, 'counting.dat') # tb.connect (usrp_rx, file_sink) return tb def main (): tb = build_graph () try: tb.run() except KeyboardInterrupt: pass if __name__ == '__main__': main () # -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see . # ############################################################################## from osv import osv class launch_map(osv.osv): _inherit = "res.partner" def open_map(self, cr, uid, ids, context=None): address_obj= self.pool.get('res.partner') partner = address_obj.browse(cr, uid, ids, context=context)[0] url="http://maps.google.com/maps?oi=map&q=" if partner.street: url+=partner.street.replace(' ','+') if partner.city: url+='+'+partner.city.replace(' ','+') if partner.state_id: url+='+'+partner.state_id.name.replace(' ','+') if partner.country_id: url+='+'+partner.country_id.name.replace(' ','+') if partner.zip: url+='+'+partner.zip.replace(' ','+') return { 'type': 'ir.actions.act_url', 'url':url, 'target': 'new' } launch_map() # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: import warnings import pytest import sqlalchemy as sa from sqlalchemy import create_engine from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.ext.hybrid import hybrid_property from sqlalchemy.orm import sessionmaker from sqlalchemy_json_api import CompositeId, QueryBuilder warnings.filterwarnings('error') @pytest.fixture(scope='class') def base(): return declarative_base() @pytest.fixture(scope='class') def group_user_cls(base): return sa.Table( 'group_user', base.metadata, sa.Column('user_id', sa.Integer, sa.ForeignKey('user.id')), sa.Column('group_id', sa.Integer, sa.ForeignKey('group.id')) ) @pytest.fixture(scope='class') def group_cls(base): class Group(base): __tablename__ = 'group' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) return Group @pytest.fixture(scope='class') def organization_cls(base): class Organization(base): __tablename__ = 'organization' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) return Organization @pytest.fixture(scope='class') def organization_membership_cls(base, organization_cls, user_cls): class OrganizationMembership(base): __tablename__ = 'organization_membership' organization_id = sa.Column( sa.Integer, sa.ForeignKey('organization.id'), primary_key=True ) user_id = sa.Column( sa.Integer, sa.ForeignKey('user.id'), primary_key=True ) user = sa.orm.relationship(user_cls, backref='memberships') organization = sa.orm.relationship(organization_cls, backref='members') is_admin = sa.Column( sa.Boolean, nullable=False, default=False, ) @hybrid_property def id(self): return CompositeId([self.organization_id, self.user_id]) return OrganizationMembership @pytest.fixture(scope='class') def friendship_cls(base): return sa.Table( 'friendships', base.metadata, sa.Column( 'friend_a_id', sa.Integer, sa.ForeignKey('user.id'), primary_key=True ), sa.Column( 'friend_b_id', sa.Integer, sa.ForeignKey('user.id'), primary_key=True ) ) @pytest.fixture(scope='class') def user_cls(base, group_user_cls, friendship_cls): class User(base): __tablename__ = 'user' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) groups = sa.orm.relationship( 'Group', secondary=group_user_cls, backref='users' ) # this relationship is used for persistence friends = sa.orm.relationship( 'User', secondary=friendship_cls, primaryjoin=id == friendship_cls.c.friend_a_id, secondaryjoin=id == friendship_cls.c.friend_b_id, ) friendship_union = sa.select([ friendship_cls.c.friend_a_id, friendship_cls.c.friend_b_id ]).union( sa.select([ friendship_cls.c.friend_b_id, friendship_cls.c.friend_a_id] ) ).alias() User.all_friends = sa.orm.relationship( 'User', secondary=friendship_union, primaryjoin=User.id == friendship_union.c.friend_a_id, secondaryjoin=User.id == friendship_union.c.friend_b_id, viewonly=True, order_by=User.id ) return User @pytest.fixture(scope='class') def category_cls(base, group_user_cls, friendship_cls): class Category(base): __tablename__ = 'category' id = sa.Column(sa.Integer, primary_key=True) name = sa.Column(sa.String) created_at = sa.Column(sa.DateTime) parent_id = sa.Column(sa.Integer, sa.ForeignKey('category.id')) parent = sa.orm.relationship( 'Category', backref='subcategories', remote_side=[id], order_by=id ) return Category @pytest.fixture(scope='class') def article_cls(base, category_cls, user_cls): class Article(base): __tablename__ = 'article' id = sa.Column(sa.Integer, primary_key=True) _name = sa.Column('name', sa.String) name_synonym = sa.orm.synonym('name') @hybrid_property def name(self): return self._name @name.setter def name(self, name): self._name = name @hybrid_property def name_upper(self): return self.name.upper() if self.name else None @name_upper.expression def name_upper(cls): return sa.func.upper(cls.name) content = sa.Column(sa.String) category_id = sa.Column(sa.Integer, sa.ForeignKey(category_cls.id)) category = sa.orm.relationship(category_cls, backref='articles') author_id = sa.Column(sa.Integer, sa.ForeignKey(user_cls.id)) author = sa.orm.relationship( user_cls, primaryjoin=author_id == user_cls.id, backref='authored_articles' ) owner_id = sa.Column(sa.Integer, sa.ForeignKey(user_cls.id)) owner = sa.orm.relationship( user_cls, primaryjoin=owner_id == user_cls.id, backref='owned_articles' ) return Article @pytest.fixture(scope='class') def comment_cls(base, article_cls, user_cls): class Comment(base): __tablename__ = 'comment' id = sa.Column(sa.Integer, primary_key=True) content = sa.Column(sa.String) article_id = sa.Column(sa.Integer, sa.ForeignKey(article_cls.id)) article = sa.orm.relationship( article_cls, backref=sa.orm.backref('comments') ) author_id = sa.Column(sa.Integer, sa.ForeignKey(user_cls.id)) author = sa.orm.relationship(user_cls, backref='comments') article_cls.comment_count = sa.orm.column_property( sa.select([sa.func.count(Comment.id)]) .where(Comment.article_id == article_cls.id) .correlate(article_cls).label('comment_count') ) return Comment @pytest.fixture(scope='class') def composite_pk_cls(base): class CompositePKModel(base): __tablename__ = 'composite_pk_model' a = sa.Column(sa.Integer, primary_key=True) b = sa.Column(sa.Integer, primary_key=True) return CompositePKModel @pytest.fixture(scope='class') def dns(): return 'postgresql://postgres@localhost/sqlalchemy_json_api_test' @pytest.yield_fixture(scope='class') def engine(dns): engine = create_engine(dns) yield engine engine.dispose() @pytest.yield_fixture(scope='class') def connection(engine): conn = engine.connect() yield conn conn.close() @pytest.fixture(scope='class') def model_mapping( article_cls, category_cls, comment_cls, group_cls, user_cls, organization_cls, organization_membership_cls ): return { 'articles': article_cls, 'categories': category_cls, 'comments': comment_cls, 'groups': group_cls, 'users': user_cls, 'organizations': organization_cls, 'memberships': organization_membership_cls } @pytest.yield_fixture(scope='class') def table_creator(base, connection, model_mapping): sa.orm.configure_mappers() base.metadata.create_all(connection) yield base.metadata.drop_all(connection) @pytest.yield_fixture(scope='class') def session(connection): Session = sessionmaker(bind=connection) session = Session() yield session session.close_all() @pytest.fixture(scope='class') def dataset( session, user_cls, group_cls, article_cls, category_cls, comment_cls, organization_cls, organization_membership_cls ): organization = organization_cls(name='Organization 1') organization2 = organization_cls(name='Organization 2') organization3 = organization_cls(name='Organization 3') group = group_cls(name='Group 1') group2 = group_cls(name='Group 2') user = user_cls( id=1, name='User 1', groups=[group, group2], memberships=[ organization_membership_cls( organization=organization, is_admin=True ), organization_membership_cls( organization=organization2, is_admin=True ), organization_membership_cls( organization=organization3, is_admin=True ) ] ) user2 = user_cls(id=2, name='User 2') user3 = user_cls(id=3, name='User 3', groups=[group]) user4 = user_cls(id=4, name='User 4', groups=[group2]) user5 = user_cls(id=5, name='User 5') user.friends = [user2] user2.friends = [user3, user4] user3.friends = [user5] article = article_cls( name='Some article', author=user, owner=user2, category=category_cls( id=1, name='Some category', subcategories=[ category_cls( id=2, name='Subcategory 1', subcategories=[ category_cls( id=3, name='Subsubcategory 1', subcategories=[ category_cls( id=5, name='Subsubsubcategory 1', ), category_cls( id=6, name='Subsubsubcategory 2', ) ] ) ] ), category_cls(id=4, name='Subcategory 2'), ] ), comments=[ comment_cls( id=1, content='Comment 1', author=user ), comment_cls( id=2, content='Comment 2', author=user2 ), comment_cls( id=3, content='Comment 3', author=user ), comment_cls( id=4, content='Comment 4', author=user2 ) ] ) session.add(user3) session.add(user4) session.add(article) session.commit() @pytest.fixture def query_builder(model_mapping): return QueryBuilder(model_mapping) #!/usr/bin/env python """ Which - locate a command * adapted from Brian Curtin's http://bugs.python.org/file15381/shutil_which.patch * see http://bugs.python.org/issue444582 * uses ``PATHEXT`` on Windows * searches current directory before ``PATH`` on Windows, but not before an explicitly passed path * accepts both string or iterable for an explicitly passed path, or pathext * accepts an explicitly passed empty path, or pathext (either '' or []) * does not search ``PATH`` for files that have a path specified in their name already * moved defpath and defpathext lists initialization to module level, instead of initializing them on each function call * changed interface: which_files() returns generator, which() returns first match, or raises IOError(errno.ENOENT) .. function:: which_files(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]]) Return a generator which yields full paths in which the *file* name exists in a directory that is part of the file name, or on *path*, and has the given *mode*. By default, *mode* matches an inclusive OR of os.F_OK and os.X_OK - an existing executable file. The *path* is, by default, the ``PATH`` variable on the platform, or the string/iterable passed in as *path*. In the event that a ``PATH`` variable is not found, :const:`os.defpath` is used. On Windows, a current directory is searched before using the ``PATH`` variable, but not before an explicitly passed *path*. The *pathext* is only used on Windows to match files with given extensions appended as well. It defaults to the ``PATHEXT`` variable, or the string/iterable passed in as *pathext*. In the event that a ``PATHEXT`` variable is not found, default value for Windows XP/Vista is used. The command is always searched without extension first, even when *pathext* is explicitly passed. .. function:: which(file [, mode=os.F_OK | os.X_OK[, path=None[, pathext=None]]]) Return first match generated by which_files(file, mode, path, pathext), or raise IOError(errno.ENOENT). """ __docformat__ = 'restructuredtext en' __all__ = 'which which_files pathsep defpath defpathext F_OK R_OK W_OK X_OK'.split() import sys from os import access, defpath, pathsep, environ, F_OK, R_OK, W_OK, X_OK from os.path import exists, dirname, split, join ENOENT = 2 windows = sys.platform.startswith('win') defpath = environ.get('PATH', defpath).split(pathsep) if windows: defpath.insert(0, '.') # can insert without checking, when duplicates are removed # given the quite usual mess in PATH on Windows, let's rather remove duplicates seen = set() defpath = [dir for dir in defpath if dir.lower() not in seen and not seen.add(dir.lower())] del seen defpathext = [''] + environ.get('PATHEXT', '.COM;.EXE;.BAT;.CMD;.VBS;.VBE;.JS;.JSE;.WSF;.WSH;.MSC').lower().split(pathsep) else: defpathext = [''] def which_files(file, mode=F_OK | X_OK, path=None, pathext=None): """ Locate a file in a path supplied as a part of the file name, or the user's path, or a supplied path. The function yields full paths (not necessarily absolute paths), in which the given file name matches an existing file in a directory on the path. >>> def test_which(expected, *args, **argd): ... result = list(which_files(*args, **argd)) ... assert result == expected, 'which_files: %s != %s' % (result, expected) ... ... try: ... result = [ which(*args, **argd) ] ... except IOError: ... result = [] ... assert result[:1] == expected[:1], 'which: %s != %s' % (result[:1], expected[:1]) >>> if windows: cmd = environ['COMSPEC'] >>> if windows: test_which([cmd], 'cmd') >>> if windows: test_which([cmd], 'cmd.exe') >>> if windows: test_which([cmd], 'cmd', path=dirname(cmd)) >>> if windows: test_which([cmd], 'cmd', pathext='.exe') >>> if windows: test_which([cmd], cmd) >>> if windows: test_which([cmd], cmd, path='') >>> if windows: test_which([cmd], cmd, pathext='') >>> if windows: test_which([cmd], cmd[:-4]) >>> if windows: test_which([cmd], cmd[:-4], path='') >>> if windows: test_which([], 'cmd', path='') >>> if windows: test_which([], 'cmd', pathext='') >>> if windows: test_which([], '/cmd') >>> if windows: test_which([], cmd[:-4], pathext='') >>> if not windows: sh = '/bin/sh' >>> if not windows: test_which([sh], 'sh') >>> if not windows: test_which([sh], 'sh', path=dirname(sh)) >>> if not windows: test_which([sh], 'sh', pathext='') >>> if not windows: test_which([sh], sh) >>> if not windows: test_which([sh], sh, path='') >>> if not windows: test_which([sh], sh, pathext='') >>> if not windows: test_which([], 'sh', mode=W_OK) # not running as root, are you? >>> if not windows: test_which([], 'sh', path='') >>> if not windows: test_which([], '/sh') """ filepath, file = split(file) if filepath: path = (filepath,) elif path is None: path = defpath elif isinstance(path, str): path = path.split(pathsep) if pathext is None: pathext = defpathext elif isinstance(pathext, str): pathext = pathext.split(pathsep) if not '' in pathext: pathext.insert(0, '') # always check command without extension, even for custom pathext for dir in path: basepath = join(dir, file) for ext in pathext: fullpath = basepath + ext if exists(fullpath) and access(fullpath, mode): yield fullpath def which(file, mode=F_OK | X_OK, path=None, pathext=None): """ Locate a file in a path supplied as a part of the file name, or the user's path, or a supplied path. The function returns full path (not necessarily absolute path), in which the given file name matches an existing file in a directory on the path, or raises IOError(errno.ENOENT). >>> # for doctest see which_files() """ path = next(which_files(file, mode, path, pathext), None) if path is None: raise IOError(ENOENT, '%s not found' % (mode & X_OK and 'command' or 'file'), file) return path if __name__ == '__main__': import doctest doctest.testmod() """ Tests of student.roles """ import ddt from django.test import TestCase from courseware.tests.factories import UserFactory, StaffFactory, InstructorFactory from student.tests.factories import AnonymousUserFactory from student.roles import ( GlobalStaff, CourseRole, CourseStaffRole, CourseInstructorRole, OrgStaffRole, OrgInstructorRole, RoleCache, CourseBetaTesterRole ) from opaque_keys.edx.locations import SlashSeparatedCourseKey class RolesTestCase(TestCase): """ Tests of student.roles """ def setUp(self): super(RolesTestCase, self).setUp() self.course_key = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') self.course_loc = self.course_key.make_usage_key('course', '2012_Fall') self.anonymous_user = AnonymousUserFactory() self.student = UserFactory() self.global_staff = UserFactory(is_staff=True) self.course_staff = StaffFactory(course_key=self.course_key) self.course_instructor = InstructorFactory(course_key=self.course_key) def test_global_staff(self): self.assertFalse(GlobalStaff().has_user(self.student)) self.assertFalse(GlobalStaff().has_user(self.course_staff)) self.assertFalse(GlobalStaff().has_user(self.course_instructor)) self.assertTrue(GlobalStaff().has_user(self.global_staff)) def test_group_name_case_sensitive(self): uppercase_course_id = "ORG/COURSE/NAME" lowercase_course_id = uppercase_course_id.lower() uppercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(uppercase_course_id) lowercase_course_key = SlashSeparatedCourseKey.from_deprecated_string(lowercase_course_id) role = "role" lowercase_user = UserFactory() CourseRole(role, lowercase_course_key).add_users(lowercase_user) uppercase_user = UserFactory() CourseRole(role, uppercase_course_key).add_users(uppercase_user) self.assertTrue(CourseRole(role, lowercase_course_key).has_user(lowercase_user)) self.assertFalse(CourseRole(role, uppercase_course_key).has_user(lowercase_user)) self.assertFalse(CourseRole(role, lowercase_course_key).has_user(uppercase_user)) self.assertTrue(CourseRole(role, uppercase_course_key).has_user(uppercase_user)) def test_course_role(self): """ Test that giving a user a course role enables access appropriately """ self.assertFalse( CourseStaffRole(self.course_key).has_user(self.student), "Student has premature access to {}".format(self.course_key) ) CourseStaffRole(self.course_key).add_users(self.student) self.assertTrue( CourseStaffRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # remove access and confirm CourseStaffRole(self.course_key).remove_users(self.student) self.assertFalse( CourseStaffRole(self.course_key).has_user(self.student), "Student still has access to {}".format(self.course_key) ) def test_org_role(self): """ Test that giving a user an org role enables access appropriately """ self.assertFalse( OrgStaffRole(self.course_key.org).has_user(self.student), "Student has premature access to {}".format(self.course_key.org) ) OrgStaffRole(self.course_key.org).add_users(self.student) self.assertTrue( OrgStaffRole(self.course_key.org).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key.org)) ) # remove access and confirm OrgStaffRole(self.course_key.org).remove_users(self.student) if hasattr(self.student, '_roles'): del self.student._roles self.assertFalse( OrgStaffRole(self.course_key.org).has_user(self.student), "Student still has access to {}".format(self.course_key.org) ) def test_org_and_course_roles(self): """ Test that Org and course roles don't interfere with course roles or vice versa """ OrgInstructorRole(self.course_key.org).add_users(self.student) CourseInstructorRole(self.course_key).add_users(self.student) self.assertTrue( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key.org)) ) self.assertTrue( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # remove access and confirm OrgInstructorRole(self.course_key.org).remove_users(self.student) self.assertFalse( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student still has access to {}".format(self.course_key.org) ) self.assertTrue( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) # ok now keep org role and get rid of course one OrgInstructorRole(self.course_key.org).add_users(self.student) CourseInstructorRole(self.course_key).remove_users(self.student) self.assertTrue( OrgInstructorRole(self.course_key.org).has_user(self.student), "Student lost has access to {}".format(self.course_key.org) ) self.assertFalse( CourseInstructorRole(self.course_key).has_user(self.student), "Student doesn't have access to {}".format(unicode(self.course_key)) ) def test_get_user_for_role(self): """ test users_for_role """ role = CourseStaffRole(self.course_key) role.add_users(self.student) self.assertGreater(len(role.users_with_role()), 0) def test_add_users_doesnt_add_duplicate_entry(self): """ Tests that calling add_users multiple times before a single call to remove_users does not result in the user remaining in the group. """ role = CourseStaffRole(self.course_key) role.add_users(self.student) self.assertTrue(role.has_user(self.student)) # Call add_users a second time, then remove just once. role.add_users(self.student) role.remove_users(self.student) self.assertFalse(role.has_user(self.student)) @ddt.ddt class RoleCacheTestCase(TestCase): IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2012_Fall') NOT_IN_KEY = SlashSeparatedCourseKey('edX', 'toy', '2013_Fall') ROLES = ( (CourseStaffRole(IN_KEY), ('staff', IN_KEY, 'edX')), (CourseInstructorRole(IN_KEY), ('instructor', IN_KEY, 'edX')), (OrgStaffRole(IN_KEY.org), ('staff', None, 'edX')), (OrgInstructorRole(IN_KEY.org), ('instructor', None, 'edX')), (CourseBetaTesterRole(IN_KEY), ('beta_testers', IN_KEY, 'edX')), ) def setUp(self): super(RoleCacheTestCase, self).setUp() self.user = UserFactory() @ddt.data(*ROLES) @ddt.unpack def test_only_in_role(self, role, target): role.add_users(self.user) cache = RoleCache(self.user) self.assertTrue(cache.has_role(*target)) for other_role, other_target in self.ROLES: if other_role == role: continue self.assertFalse(cache.has_role(*other_target)) @ddt.data(*ROLES) @ddt.unpack def test_empty_cache(self, role, target): cache = RoleCache(self.user) self.assertFalse(cache.has_role(*target)) #!/usr/bin/env python """\ Sanitize a bitbake file following the OpenEmbedded style guidelines, see http://openembedded.org/wiki/StyleGuide (C) 2006 Cyril Romain MIT license TODO: - add the others OpenEmbedded variables commonly used: - parse command arguments and print usage on misuse . prevent giving more than one .bb file in arguments - write result to a file - backup the original .bb file - make a diff and ask confirmation for patching ? - do not use startswith only: /!\ startswith('SOMETHING') is not taken into account due to the previous startswith('S'). - count rule breaks and displays them in the order frequence """ import fileinput import string import re __author__ = "Cyril Romain " __version__ = "$Revision: 0.5 $" # The standard set of variables often found in .bb files in the preferred order OE_vars = [ 'DESCRIPTION', 'AUTHOR', 'HOMEPAGE', 'SECTION', 'PRIORITY', 'LICENSE', 'DEPENDS', 'RDEPENDS', 'RRECOMMENDS', 'RSUGGESTS', 'PROVIDES', 'RPROVIDES', 'RCONFLICTS', 'SRCDATE', 'PE', 'PV', 'PR', 'INC_PR', 'SRC_URI', 'S', 'GPE_TARBALL_SUFFIX', 'inherit', 'EXTRA_', 'export', 'do_fetch', 'do_unpack', 'do_patch', 'do_configure', 'do_compile', 'do_install', 'do_package', 'do_stage', 'PACKAGE_ARCH', 'PACKAGES', 'FILES', 'WORKDIR', 'acpaths', 'addhandler', 'addtask', 'bindir', 'headers', 'include', 'includedir', 'python', 'qtopiadir', 'pkg_preins', 'pkg_prerm', 'pkg_postins', 'pkg_postrm', 'require', 'sbindir', 'basesysconfdir', 'sysconfdir', 'ALLOW_EMPTY', 'ALTERNATIVE_NAME', 'ALTERNATIVE_PATH', 'ALTERNATIVE_LINK', 'ALTERNATIVE_PRIORITY', 'ALTNAME', 'AMD_DRIVER_LABEL', 'AMD_DRIVER_VERSION', 'ANGSTROM_EXTRA_INSTALL', 'APPDESKTOP', 'APPIMAGE', 'APPNAME', 'APPTYPE', 'APPWEB_BUILD', 'APPWEB_HOST', 'AR', 'ARCH', 'ARM_INSTRUCTION_SET', 'ARM_MUTEX', 'ART_CONFIG', 'B', 'BJAM_OPTS', 'BJAM_TOOLS', 'BONOBO_HEADERS', 'BOOTSCRIPTS', 'BROKEN', 'BUILD_CPPFLAGS', 'CFLAGS', 'CCFLAGS', 'CMDLINE', 'COLLIE_MEMORY_SIZE', 'COMPATIBLE_HOST', 'COMPATIBLE_MACHINE', 'COMPILE_HERMES', 'CONFFILES', 'CONFLICTS', 'CORE_EXTRA_D', 'CORE_PACKAGES_D', 'CORE_PACKAGES_RD', 'CPPFLAGS', 'CVSDATE', 'CXXFLAGS', 'DEBIAN_NOAUTONAME', 'DEBUG_APPS', 'DEFAULT_PREFERENCE', 'DB4_CONFIG', 'EXCLUDE_FROM_SHLIBS', 'EXCLUDE_FROM_WORLD', 'FIXEDSRCDATE', 'GLIBC_ADDONS', 'GLIBC_EXTRA_OECONF', 'GNOME_VFS_HEADERS', 'HEADERS', 'INHIBIT_DEFAULT_DEPS', 'INITSCRIPT_PACKAGES', 'INITSCRIPT_NAME', 'INITSCRIPT_PARAMS', 'PACKAGE_INSTALL', 'KERNEL_IMAGETYPE', 'KERNEL_IMAGEDEST', 'KERNEL_OUTPUT', 'KERNEL_RELEASE', 'KERNEL_PRIORITY', 'KERNEL_SOURCE', 'KERNEL_SUFFIX', 'KERNEL_VERSION', 'K_MAJOR', 'K_MICRO', 'K_MINOR', 'HHV', 'KV', 'LDFLAGS', 'LD', 'LD_SO', 'LDLIBS', 'LEAD_SONAME', 'LIBTOOL', 'LIBBDB_EXTRA', 'LIBV', 'MACHINE_ESSENTIAL_EXTRA_RDEPENDS', 'MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS', 'MACHINE_EXTRA_RDEPENDS', 'MACHINE_EXTRA_RRECOMMENDS', 'MACHINE_FEATURES', 'MACHINE_TASKS', 'MACHINE', 'MACHTYPE', 'MAKE_TARGETS', 'MESSAGEUSER', 'MESSAGEHOME', 'MIRRORS', 'MUTEX', 'OE_QMAKE_INCDIR_QT', 'OE_QMAKE_CXXFLAGS', 'ORBIT_IDL_SRC', 'PARALLEL_MAKE', 'PAKCAGE_ARCH', 'PCMCIA_MANAGER', 'PKG_BASENAME', 'PKG', 'QEMU', 'QMAKE_PROFILES', 'QPEDIR', 'QPF_DESCRIPTION', 'QPF_PKGPATTERN', 'QT_CONFIG_FLAGS', 'QT_LIBRARY', 'ROOTFS_POSTPROCESS_COMMAND', 'RREPLACES', 'TARGET_CFLAGS', 'TARGET_CPPFLAGS', 'TARGET_LDFLAGS', 'UBOOT_MACHINE', 'UCLIBC_BASE', 'UCLIBC_PATCHES', 'VIRTUAL_NAME', 'XORG_PN', 'XSERVER', 'others' ] varRegexp = r'^([a-zA-Z_0-9${}-]*)([ \t]*)([+.:]?=[+.]?)([ \t]*)([^\t]+)' routineRegexp = r'^([a-zA-Z0-9_ ${}-]+?)\(' # Variables seen in the processed .bb seen_vars = {} for v in OE_vars: seen_vars[v] = [] # _Format guideline #0_: # No spaces are allowed at the beginning of lines that define a variable or # a do_ routine def respect_rule0(line): return line.lstrip()==line def conformTo_rule0(line): return line.lstrip() # _Format guideline #1_: # No spaces are allowed behind the line continuation symbol '\' def respect_rule1(line): if line.rstrip().endswith('\\'): return line.endswith('\\') else: return True def conformTo_rule1(line): return line.rstrip() # _Format guideline #2_: # Tabs should not be used (use spaces instead). def respect_rule2(line): return line.count('\t')==0 def conformTo_rule2(line): return line.expandtabs() # _Format guideline #3_: # Comments inside bb files are allowed using the '#' character at the # beginning of a line. def respect_rule3(line): if line.lstrip().startswith('#'): return line.startswith('#') else: return True def conformTo_rule3(line): return line.lstrip() # _Format guideline #4_: # Use quotes on the right hand side of assignments FOO = "BAR" def respect_rule4(line): r = re.search(varRegexp, line) if r is not None: r2 = re.search(r'("?)([^"\\]*)(["\\]?)', r.group(5)) # do not test for None it because always match return r2.group(1)=='"' and r2.group(3)!='' return False def conformTo_rule4(line): r = re.search(varRegexp, line) return ''.join([r.group(1), ' ', r.group(3), ' "', r.group(5), r.group(5).endswith('"') and '' or '"']) # _Format guideline #5_: # The correct spacing for a variable is FOO = "BAR". def respect_rule5(line): r = re.search(varRegexp, line) return r is not None and r.group(2)==" " and r.group(4)==" " def conformTo_rule5(line): r = re.search(varRegexp, line) return ''.join([r.group(1), ' ', r.group(3), ' ', r.group(5)]) # _Format guideline #6_: # Don't use spaces or tabs on empty lines def respect_rule6(line): return not line.isspace() or line=="\n" def conformTo_rule6(line): return "" # _Format guideline #7_: # Indentation of multiline variables such as SRC_URI is desireable. def respect_rule7(line): return True def conformTo_rule7(line): return line rules = ( (respect_rule0, conformTo_rule0, "No spaces are allowed at the beginning of lines that define a variable or a do_ routine"), (respect_rule1, conformTo_rule1, "No spaces are allowed behind the line continuation symbol '\\'"), (respect_rule2, conformTo_rule2, "Tabs should not be used (use spaces instead)"), (respect_rule3, conformTo_rule3, "Comments inside bb files are allowed using the '#' character at the beginning of a line"), (respect_rule4, conformTo_rule4, "Use quotes on the right hand side of assignments FOO = \"BAR\""), (respect_rule5, conformTo_rule5, "The correct spacing for a variable is FOO = \"BAR\""), (respect_rule6, conformTo_rule6, "Don't use spaces or tabs on empty lines"), (respect_rule7, conformTo_rule7, "Indentation of multiline variables such as SRC_URI is desireable"), ) # Function to check that a line respects a rule. If not, it tries to conform # the line to the rule. Reminder or Disgression message are dump accordingly. def follow_rule(i, line): oldline = line # if the line does not respect the rule if not rules[i][0](line): # try to conform it to the rule line = rules[i][1](line) # if the line still does not respect the rule if not rules[i][0](line): # this is a rule disgression print "## Disgression: ", rules[i][2], " in:", oldline else: # just remind user about his/her errors print "## Reminder: ", rules[i][2], " in :", oldline return line if __name__ == "__main__": # -- retrieves the lines of the .bb file -- lines = [] for line in fileinput.input(): # use 'if True' to warn user about all the rule he/she breaks # use 'if False' to conform to rules{2,1,6} without warnings if True: lines.append(line) else: # expandtabs on each line so that rule2 is always respected # rstrip each line so that rule1 is always respected line = line.expandtabs().rstrip() # ignore empty lines (or line filled with spaces or tabs only) # so that rule6 is always respected if line is not '': lines.append(line) # -- parse the file -- var = "" in_routine = False commentBloc = [] olines = [] for line in lines: originalLine = line # rstrip line to remove line breaks characters line = line.rstrip() line = follow_rule(2, line) line = follow_rule(1, line) line = follow_rule(6, line) # ignore empty lines if line.isspace() or line is '': # flush comments into the olines for c in commentBloc: olines.append(c) commentBloc = [] continue if line.startswith('}'): in_routine=False keep = line.endswith('\\') or in_routine # handles commented lines if line.lstrip().startswith('#'): # check and follow rule3 if not in a variables or routines if not in_routine: line = follow_rule(3, line) commentBloc.append(line) continue if seen_vars.has_key(var): for c in commentBloc: seen_vars[var].append(c) commentBloc = [] seen_vars[var].append(line) else: for k in OE_vars: if line.startswith(k): var = k break if re.match(routineRegexp, line) is not None: in_routine=True line = follow_rule(0, line) elif re.match(varRegexp, line) is not None: line = follow_rule(0, line) line = follow_rule(4, line) line = follow_rule(5, line) if var == "": if not in_routine: print "## Warning: unknown variable/routine \"%s\"" % originalLine var = 'others' for c in commentBloc: seen_vars[var].append(c) commentBloc = [] seen_vars[var].append(line) if not keep and not in_routine: var = "" # -- dump the sanitized .bb file -- addEmptyLine = False # write comments that are not related to variables nor routines for l in commentBloc: olines.append(l) # write variables and routines previourVarPrefix = "unknown" for k in OE_vars: if k=='SRC_URI': addEmptyLine = True if seen_vars[k] != []: if addEmptyLine and not k.startswith(previourVarPrefix): olines.append("") for l in seen_vars[k]: olines.append(l) previourVarPrefix = k.split('_')[0]=='' and "unknown" or k.split('_')[0] for line in olines: print line ########################################################################## # # Copyright (c) 2014, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import Gaffer import GafferScene ########################################################################## # Metadata ########################################################################## Gaffer.Metadata.registerNode( GafferScene.DeleteOptions, "description", """ A node which removes options from the globals. """, plugs = { "names" : [ "description", """ The names of options to be removed. Names should be separated by spaces and can use Gaffer's standard wildcards. """, ], "invertNames" : [ "description", """ When on, matching names are kept, and non-matching names are removed. """, ], } ) ## pygame - Python Game Library ## Copyright (C) 2000-2003 Pete Shinners ## ## This library is free software; you can redistribute it and/or ## modify it under the terms of the GNU Library General Public ## License as published by the Free Software Foundation; either ## version 2 of the License, or (at your option) any later version. ## ## This library is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## Library General Public License for more details. ## ## You should have received a copy of the GNU Library General Public ## License along with this library; if not, write to the Free ## Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ## ## Pete Shinners ## pete@shinners.org """Set of functions from PyGame that are handy to have in the local namespace for your module""" from pygame.constants import * from pygame.rect import Rect import pygame.color as color Color = color.Color # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Simple benchmarks for reductions and their gradients.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import time import numpy as np from six.moves import range # pylint: disable=redefined-builtin from tensorflow.core.protobuf import config_pb2 from tensorflow.python.client import session from tensorflow.python.eager import backprop from tensorflow.python.eager import context from tensorflow.python.framework import constant_op from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import gradients_impl from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class ReduceBenchmarks(test.Benchmark): """Benchmarks for reductions.""" def _run(self, func, num_iters): # call func to maybe warm up the GPU func() start = time.time() for _ in range(num_iters): func() end = time.time() mean_us = (end - start) * 1e6 / num_iters self.report_benchmark( iters=num_iters, wall_time=mean_us, extras={"examples_per_sec": num_iters / (end - start)}) def benchmark_reduce_sum_grad_eager(self): with context.eager_mode(): tensor = array_ops.zeros([100, 1000]) def fn(): backprop.gradients_function(math_ops.reduce_sum, [0])(tensor) self._run(fn, 10000) def benchmark_reduce_sum_grad_eager_cpu(self): with context.eager_mode(), ops.device("/cpu:0"): tensor = array_ops.zeros([100, 1000]) def fn(): backprop.gradients_function(math_ops.reduce_sum, [0])(tensor) self._run(fn, 10000) def benchmark_reduce_sum_grad_graph(self): config = config_pb2.ConfigProto( graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L0))) with ops.Graph().as_default(), session.Session(config=config) as sess: tensor = constant_op.constant(np.zeros([100, 1000], dtype=np.float32)) reduction = math_ops.reduce_sum(tensor) grad, = gradients_impl.gradients(reduction, tensor) def fn(): sess.run(grad.op) self._run(fn, 10000) def benchmark_reduce_sum_grad_graph_cpu(self): config = config_pb2.ConfigProto( graph_options=config_pb2.GraphOptions( optimizer_options=config_pb2.OptimizerOptions( opt_level=config_pb2.OptimizerOptions.L0))) with ops.Graph().as_default(), session.Session(config=config) as sess: with ops.device("/cpu:0"): tensor = constant_op.constant(np.zeros([100, 1000], dtype=np.float32)) reduction = math_ops.reduce_sum(tensor) grad, = gradients_impl.gradients(reduction, tensor) def fn(): sess.run(grad.op) self._run(fn, 10000) if __name__ == "__main__": test.main() #!/usr/bin/python2.4 # # Copyright 2008 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Mox, an object-mocking framework for Python. Mox works in the record-replay-verify paradigm. When you first create a mock object, it is in record mode. You then programmatically set the expected behavior of the mock object (what methods are to be called on it, with what parameters, what they should return, and in what order). Once you have set up the expected mock behavior, you put it in replay mode. Now the mock responds to method calls just as you told it to. If an unexpected method (or an expected method with unexpected parameters) is called, then an exception will be raised. Once you are done interacting with the mock, you need to verify that all the expected interactions occured. (Maybe your code exited prematurely without calling some cleanup method!) The verify phase ensures that every expected method was called; otherwise, an exception will be raised. WARNING! Mock objects created by Mox are not thread-safe. If you are call a mock in multiple threads, it should be guarded by a mutex. TODO(user): Add the option to make mocks thread-safe! Suggested usage / workflow: # Create Mox factory my_mox = Mox() # Create a mock data access object mock_dao = my_mox.CreateMock(DAOClass) # Set up expected behavior mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person) mock_dao.DeletePerson(person) # Put mocks in replay mode my_mox.ReplayAll() # Inject mock object and run test controller.SetDao(mock_dao) controller.DeletePersonById('1') # Verify all methods were called as expected my_mox.VerifyAll() """ from collections import deque import difflib import inspect import re import types import unittest import stubout class Error(AssertionError): """Base exception for this module.""" pass class ExpectedMethodCallsError(Error): """Raised when Verify() is called before all expected methods have been called """ def __init__(self, expected_methods): """Init exception. Args: # expected_methods: A sequence of MockMethod objects that should have been # called. expected_methods: [MockMethod] Raises: ValueError: if expected_methods contains no methods. """ if not expected_methods: raise ValueError("There must be at least one expected method") Error.__init__(self) self._expected_methods = expected_methods def __str__(self): calls = "\n".join(["%3d. %s" % (i, m) for i, m in enumerate(self._expected_methods)]) return "Verify: Expected methods never called:\n%s" % (calls,) class UnexpectedMethodCallError(Error): """Raised when an unexpected method is called. This can occur if a method is called with incorrect parameters, or out of the specified order. """ def __init__(self, unexpected_method, expected): """Init exception. Args: # unexpected_method: MockMethod that was called but was not at the head of # the expected_method queue. # expected: MockMethod or UnorderedGroup the method should have # been in. unexpected_method: MockMethod expected: MockMethod or UnorderedGroup """ Error.__init__(self) if expected is None: self._str = "Unexpected method call %s" % (unexpected_method,) else: differ = difflib.Differ() diff = differ.compare(str(unexpected_method).splitlines(True), str(expected).splitlines(True)) self._str = ("Unexpected method call. unexpected:- expected:+\n%s" % ("\n".join(line.rstrip() for line in diff),)) def __str__(self): return self._str class UnknownMethodCallError(Error): """Raised if an unknown method is requested of the mock object.""" def __init__(self, unknown_method_name): """Init exception. Args: # unknown_method_name: Method call that is not part of the mocked class's # public interface. unknown_method_name: str """ Error.__init__(self) self._unknown_method_name = unknown_method_name def __str__(self): return "Method called is not a member of the object: %s" % \ self._unknown_method_name class PrivateAttributeError(Error): """ Raised if a MockObject is passed a private additional attribute name. """ def __init__(self, attr): Error.__init__(self) self._attr = attr def __str__(self): return ("Attribute '%s' is private and should not be available in a mock " "object." % attr) class ExpectedMockCreationError(Error): """Raised if mocks should have been created by StubOutClassWithMocks.""" def __init__(self, expected_mocks): """Init exception. Args: # expected_mocks: A sequence of MockObjects that should have been # created Raises: ValueError: if expected_mocks contains no methods. """ if not expected_mocks: raise ValueError("There must be at least one expected method") Error.__init__(self) self._expected_mocks = expected_mocks def __str__(self): mocks = "\n".join(["%3d. %s" % (i, m) for i, m in enumerate(self._expected_mocks)]) return "Verify: Expected mocks never created:\n%s" % (mocks,) class UnexpectedMockCreationError(Error): """Raised if too many mocks were created by StubOutClassWithMocks.""" def __init__(self, instance, *params, **named_params): """Init exception. Args: # instance: the type of obejct that was created # params: parameters given during instantiation # named_params: named parameters given during instantiation """ Error.__init__(self) self._instance = instance self._params = params self._named_params = named_params def __str__(self): args = ", ".join(["%s" % v for i, v in enumerate(self._params)]) error = "Unexpected mock creation: %s(%s" % (self._instance, args) if self._named_params: error += ", " + ", ".join(["%s=%s" % (k, v) for k, v in self._named_params.iteritems()]) error += ")" return error class Mox(object): """Mox: a factory for creating mock objects.""" # A list of types that should be stubbed out with MockObjects (as # opposed to MockAnythings). _USE_MOCK_OBJECT = [types.ClassType, types.FunctionType, types.InstanceType, types.ModuleType, types.ObjectType, types.TypeType, types.MethodType, types.UnboundMethodType, ] # A list of types that may be stubbed out with a MockObjectFactory. _USE_MOCK_FACTORY = [types.ClassType, types.ObjectType, types.TypeType] def __init__(self): """Initialize a new Mox.""" self._mock_objects = [] self.stubs = stubout.StubOutForTesting() def CreateMock(self, class_to_mock, attrs=None): """Create a new mock object. Args: # class_to_mock: the class to be mocked class_to_mock: class attrs: dict of attribute names to values that will be set on the mock object. Only public attributes may be set. Returns: MockObject that can be used as the class_to_mock would be. """ if attrs is None: attrs = {} new_mock = MockObject(class_to_mock, attrs=attrs) self._mock_objects.append(new_mock) return new_mock def CreateMockAnything(self, description=None): """Create a mock that will accept any method calls. This does not enforce an interface. Args: description: str. Optionally, a descriptive name for the mock object being created, for debugging output purposes. """ new_mock = MockAnything(description=description) self._mock_objects.append(new_mock) return new_mock def ReplayAll(self): """Set all mock objects to replay mode.""" for mock_obj in self._mock_objects: mock_obj._Replay() def VerifyAll(self): """Call verify on all mock objects created.""" for mock_obj in self._mock_objects: mock_obj._Verify() def ResetAll(self): """Call reset on all mock objects. This does not unset stubs.""" for mock_obj in self._mock_objects: mock_obj._Reset() def StubOutWithMock(self, obj, attr_name, use_mock_anything=False): """Replace a method, attribute, etc. with a Mock. This will replace a class or module with a MockObject, and everything else (method, function, etc) with a MockAnything. This can be overridden to always use a MockAnything by setting use_mock_anything to True. Args: obj: A Python object (class, module, instance, callable). attr_name: str. The name of the attribute to replace with a mock. use_mock_anything: bool. True if a MockAnything should be used regardless of the type of attribute. """ attr_to_replace = getattr(obj, attr_name) attr_type = type(attr_to_replace) if attr_type == MockAnything or attr_type == MockObject: raise TypeError('Cannot mock a MockAnything! Did you remember to ' 'call UnsetStubs in your previous test?') if attr_type in self._USE_MOCK_OBJECT and not use_mock_anything: stub = self.CreateMock(attr_to_replace) else: stub = self.CreateMockAnything(description='Stub for %s' % attr_to_replace) stub.__name__ = attr_name self.stubs.Set(obj, attr_name, stub) def StubOutClassWithMocks(self, obj, attr_name): """Replace a class with a "mock factory" that will create mock objects. This is useful if the code-under-test directly instantiates dependencies. Previously some boilder plate was necessary to create a mock that would act as a factory. Using StubOutClassWithMocks, once you've stubbed out the class you may use the stubbed class as you would any other mock created by mox: during the record phase, new mock instances will be created, and during replay, the recorded mocks will be returned. In replay mode # Example using StubOutWithMock (the old, clunky way): mock1 = mox.CreateMock(my_import.FooClass) mock2 = mox.CreateMock(my_import.FooClass) foo_factory = mox.StubOutWithMock(my_import, 'FooClass', use_mock_anything=True) foo_factory(1, 2).AndReturn(mock1) foo_factory(9, 10).AndReturn(mock2) mox.ReplayAll() my_import.FooClass(1, 2) # Returns mock1 again. my_import.FooClass(9, 10) # Returns mock2 again. mox.VerifyAll() # Example using StubOutClassWithMocks: mox.StubOutClassWithMocks(my_import, 'FooClass') mock1 = my_import.FooClass(1, 2) # Returns a new mock of FooClass mock2 = my_import.FooClass(9, 10) # Returns another mock instance mox.ReplayAll() my_import.FooClass(1, 2) # Returns mock1 again. my_import.FooClass(9, 10) # Returns mock2 again. mox.VerifyAll() """ attr_to_replace = getattr(obj, attr_name) attr_type = type(attr_to_replace) if attr_type == MockAnything or attr_type == MockObject: raise TypeError('Cannot mock a MockAnything! Did you remember to ' 'call UnsetStubs in your previous test?') if attr_type not in self._USE_MOCK_FACTORY: raise TypeError('Given attr is not a Class. Use StubOutWithMock.') factory = _MockObjectFactory(attr_to_replace, self) self._mock_objects.append(factory) self.stubs.Set(obj, attr_name, factory) def UnsetStubs(self): """Restore stubs to their original state.""" self.stubs.UnsetAll() def Replay(*args): """Put mocks into Replay mode. Args: # args is any number of mocks to put into replay mode. """ for mock in args: mock._Replay() def Verify(*args): """Verify mocks. Args: # args is any number of mocks to be verified. """ for mock in args: mock._Verify() def Reset(*args): """Reset mocks. Args: # args is any number of mocks to be reset. """ for mock in args: mock._Reset() class MockAnything: """A mock that can be used to mock anything. This is helpful for mocking classes that do not provide a public interface. """ def __init__(self, description=None): """Initialize a new MockAnything. Args: description: str. Optionally, a descriptive name for the mock object being created, for debugging output purposes. """ self._description = description self._Reset() def __repr__(self): if self._description: return '' % self._description else: return '' def __getattr__(self, method_name): """Intercept method calls on this object. A new MockMethod is returned that is aware of the MockAnything's state (record or replay). The call will be recorded or replayed by the MockMethod's __call__. Args: # method name: the name of the method being called. method_name: str Returns: A new MockMethod aware of MockAnything's state (record or replay). """ if method_name == '__dir__': return self.__class__.__dir__.__get__(self, self.__class__) return self._CreateMockMethod(method_name) def _CreateMockMethod(self, method_name, method_to_mock=None): """Create a new mock method call and return it. Args: # method_name: the name of the method being called. # method_to_mock: The actual method being mocked, used for introspection. method_name: str method_to_mock: a method object Returns: A new MockMethod aware of MockAnything's state (record or replay). """ return MockMethod(method_name, self._expected_calls_queue, self._replay_mode, method_to_mock=method_to_mock, description=self._description) def __nonzero__(self): """Return 1 for nonzero so the mock can be used as a conditional.""" return 1 def __eq__(self, rhs): """Provide custom logic to compare objects.""" return (isinstance(rhs, MockAnything) and self._replay_mode == rhs._replay_mode and self._expected_calls_queue == rhs._expected_calls_queue) def __ne__(self, rhs): """Provide custom logic to compare objects.""" return not self == rhs def _Replay(self): """Start replaying expected method calls.""" self._replay_mode = True def _Verify(self): """Verify that all of the expected calls have been made. Raises: ExpectedMethodCallsError: if there are still more method calls in the expected queue. """ # If the list of expected calls is not empty, raise an exception if self._expected_calls_queue: # The last MultipleTimesGroup is not popped from the queue. if (len(self._expected_calls_queue) == 1 and isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and self._expected_calls_queue[0].IsSatisfied()): pass else: raise ExpectedMethodCallsError(self._expected_calls_queue) def _Reset(self): """Reset the state of this mock to record mode with an empty queue.""" # Maintain a list of method calls we are expecting self._expected_calls_queue = deque() # Make sure we are in setup mode, not replay mode self._replay_mode = False class MockObject(MockAnything, object): """A mock object that simulates the public/protected interface of a class.""" def __init__(self, class_to_mock, attrs=None): """Initialize a mock object. This determines the methods and properties of the class and stores them. Args: # class_to_mock: class to be mocked class_to_mock: class attrs: dict of attribute names to values that will be set on the mock object. Only public attributes may be set. Raises: PrivateAttributeError: if a supplied attribute is not public. ValueError: if an attribute would mask an existing method. """ if attrs is None: attrs = {} # This is used to hack around the mixin/inheritance of MockAnything, which # is not a proper object (it can be anything. :-) MockAnything.__dict__['__init__'](self) # Get a list of all the public and special methods we should mock. self._known_methods = set() self._known_vars = set() self._class_to_mock = class_to_mock try: if inspect.isclass(self._class_to_mock): self._description = class_to_mock.__name__ else: self._description = type(class_to_mock).__name__ except Exception: pass for method in dir(class_to_mock): attr = getattr(class_to_mock, method) if callable(attr): self._known_methods.add(method) elif not (type(attr) is property): # treating properties as class vars makes little sense. self._known_vars.add(method) # Set additional attributes at instantiation time; this is quicker # than manually setting attributes that are normally created in # __init__. for attr, value in attrs.items(): if attr.startswith("_"): raise PrivateAttributeError(attr) elif attr in self._known_methods: raise ValueError("'%s' is a method of '%s' objects." % (attr, class_to_mock)) else: setattr(self, attr, value) def __getattr__(self, name): """Intercept attribute request on this object. If the attribute is a public class variable, it will be returned and not recorded as a call. If the attribute is not a variable, it is handled like a method call. The method name is checked against the set of mockable methods, and a new MockMethod is returned that is aware of the MockObject's state (record or replay). The call will be recorded or replayed by the MockMethod's __call__. Args: # name: the name of the attribute being requested. name: str Returns: Either a class variable or a new MockMethod that is aware of the state of the mock (record or replay). Raises: UnknownMethodCallError if the MockObject does not mock the requested method. """ if name in self._known_vars: return getattr(self._class_to_mock, name) if name in self._known_methods: return self._CreateMockMethod( name, method_to_mock=getattr(self._class_to_mock, name)) raise UnknownMethodCallError(name) def __eq__(self, rhs): """Provide custom logic to compare objects.""" return (isinstance(rhs, MockObject) and self._class_to_mock == rhs._class_to_mock and self._replay_mode == rhs._replay_mode and self._expected_calls_queue == rhs._expected_calls_queue) def __setitem__(self, key, value): """Provide custom logic for mocking classes that support item assignment. Args: key: Key to set the value for. value: Value to set. Returns: Expected return value in replay mode. A MockMethod object for the __setitem__ method that has already been called if not in replay mode. Raises: TypeError if the underlying class does not support item assignment. UnexpectedMethodCallError if the object does not expect the call to __setitem__. """ # Verify the class supports item assignment. if '__setitem__' not in dir(self._class_to_mock): raise TypeError('object does not support item assignment') # If we are in replay mode then simply call the mock __setitem__ method. if self._replay_mode: return MockMethod('__setitem__', self._expected_calls_queue, self._replay_mode)(key, value) # Otherwise, create a mock method __setitem__. return self._CreateMockMethod('__setitem__')(key, value) def __getitem__(self, key): """Provide custom logic for mocking classes that are subscriptable. Args: key: Key to return the value for. Returns: Expected return value in replay mode. A MockMethod object for the __getitem__ method that has already been called if not in replay mode. Raises: TypeError if the underlying class is not subscriptable. UnexpectedMethodCallError if the object does not expect the call to __getitem__. """ # Verify the class supports item assignment. if '__getitem__' not in dir(self._class_to_mock): raise TypeError('unsubscriptable object') # If we are in replay mode then simply call the mock __getitem__ method. if self._replay_mode: return MockMethod('__getitem__', self._expected_calls_queue, self._replay_mode)(key) # Otherwise, create a mock method __getitem__. return self._CreateMockMethod('__getitem__')(key) def __iter__(self): """Provide custom logic for mocking classes that are iterable. Returns: Expected return value in replay mode. A MockMethod object for the __iter__ method that has already been called if not in replay mode. Raises: TypeError if the underlying class is not iterable. UnexpectedMethodCallError if the object does not expect the call to __iter__. """ methods = dir(self._class_to_mock) # Verify the class supports iteration. if '__iter__' not in methods: # If it doesn't have iter method and we are in replay method, then try to # iterate using subscripts. if '__getitem__' not in methods or not self._replay_mode: raise TypeError('not iterable object') else: results = [] index = 0 try: while True: results.append(self[index]) index += 1 except IndexError: return iter(results) # If we are in replay mode then simply call the mock __iter__ method. if self._replay_mode: return MockMethod('__iter__', self._expected_calls_queue, self._replay_mode)() # Otherwise, create a mock method __iter__. return self._CreateMockMethod('__iter__')() def __contains__(self, key): """Provide custom logic for mocking classes that contain items. Args: key: Key to look in container for. Returns: Expected return value in replay mode. A MockMethod object for the __contains__ method that has already been called if not in replay mode. Raises: TypeError if the underlying class does not implement __contains__ UnexpectedMethodCaller if the object does not expect the call to __contains__. """ contains = self._class_to_mock.__dict__.get('__contains__', None) if contains is None: raise TypeError('unsubscriptable object') if self._replay_mode: return MockMethod('__contains__', self._expected_calls_queue, self._replay_mode)(key) return self._CreateMockMethod('__contains__')(key) def __call__(self, *params, **named_params): """Provide custom logic for mocking classes that are callable.""" # Verify the class we are mocking is callable. callable = hasattr(self._class_to_mock, '__call__') if not callable: raise TypeError('Not callable') # Because the call is happening directly on this object instead of a method, # the call on the mock method is made right here # If we are mocking a Function, then use the function, and not the # __call__ method method = None if type(self._class_to_mock) in (types.FunctionType, types.MethodType): method = self._class_to_mock; else: method = getattr(self._class_to_mock, '__call__') mock_method = self._CreateMockMethod('__call__', method_to_mock=method) return mock_method(*params, **named_params) @property def __class__(self): """Return the class that is being mocked.""" return self._class_to_mock @property def __name__(self): """Return the name that is being mocked.""" return self._description class _MockObjectFactory(MockObject): """A MockObjectFactory creates mocks and verifies __init__ params. A MockObjectFactory removes the boiler plate code that was previously necessary to stub out direction instantiation of a class. The MockObjectFactory creates new MockObjects when called and verifies the __init__ params are correct when in record mode. When replaying, existing mocks are returned, and the __init__ params are verified. See StubOutWithMock vs StubOutClassWithMocks for more detail. """ def __init__(self, class_to_mock, mox_instance): MockObject.__init__(self, class_to_mock) self._mox = mox_instance self._instance_queue = deque() def __call__(self, *params, **named_params): """Instantiate and record that a new mock has been created.""" method = getattr(self._class_to_mock, '__init__') mock_method = self._CreateMockMethod('__init__', method_to_mock=method) # Note: calling mock_method() is deferred in order to catch the # empty instance_queue first. if self._replay_mode: if not self._instance_queue: raise UnexpectedMockCreationError(self._class_to_mock, *params, **named_params) mock_method(*params, **named_params) return self._instance_queue.pop() else: mock_method(*params, **named_params) instance = self._mox.CreateMock(self._class_to_mock) self._instance_queue.appendleft(instance) return instance def _Verify(self): """Verify that all mocks have been created.""" if self._instance_queue: raise ExpectedMockCreationError(self._instance_queue) super(_MockObjectFactory, self)._Verify() class MethodSignatureChecker(object): """Ensures that methods are called correctly.""" _NEEDED, _DEFAULT, _GIVEN = range(3) def __init__(self, method): """Creates a checker. Args: # method: A method to check. method: function Raises: ValueError: method could not be inspected, so checks aren't possible. Some methods and functions like built-ins can't be inspected. """ try: self._args, varargs, varkw, defaults = inspect.getargspec(method) except TypeError: raise ValueError('Could not get argument specification for %r' % (method,)) if inspect.ismethod(method): self._args = self._args[1:] # Skip 'self'. self._method = method self._instance = None # May contain the instance this is bound to. self._has_varargs = varargs is not None self._has_varkw = varkw is not None if defaults is None: self._required_args = self._args self._default_args = [] else: self._required_args = self._args[:-len(defaults)] self._default_args = self._args[-len(defaults):] def _RecordArgumentGiven(self, arg_name, arg_status): """Mark an argument as being given. Args: # arg_name: The name of the argument to mark in arg_status. # arg_status: Maps argument names to one of _NEEDED, _DEFAULT, _GIVEN. arg_name: string arg_status: dict Raises: AttributeError: arg_name is already marked as _GIVEN. """ if arg_status.get(arg_name, None) == MethodSignatureChecker._GIVEN: raise AttributeError('%s provided more than once' % (arg_name,)) arg_status[arg_name] = MethodSignatureChecker._GIVEN def Check(self, params, named_params): """Ensures that the parameters used while recording a call are valid. Args: # params: A list of positional parameters. # named_params: A dict of named parameters. params: list named_params: dict Raises: AttributeError: the given parameters don't work with the given method. """ arg_status = dict((a, MethodSignatureChecker._NEEDED) for a in self._required_args) for arg in self._default_args: arg_status[arg] = MethodSignatureChecker._DEFAULT # WARNING: Suspect hack ahead. # # Check to see if this is an unbound method, where the instance # should be bound as the first argument. We try to determine if # the first argument (param[0]) is an instance of the class, or it # is equivalent to the class (used to account for Comparators). # # NOTE: If a Func() comparator is used, and the signature is not # correct, this will cause extra executions of the function. if inspect.ismethod(self._method): # The extra param accounts for the bound instance. if len(params) > len(self._required_args): expected = getattr(self._method, 'im_class', None) # Check if the param is an instance of the expected class, # or check equality (useful for checking Comparators). # This is a hack to work around the fact that the first # parameter can be a Comparator, and the comparison may raise # an exception during this comparison, which is OK. try: param_equality = (params[0] == expected) except: param_equality = False; if isinstance(params[0], expected) or param_equality: params = params[1:] # If the IsA() comparator is being used, we need to check the # inverse of the usual case - that the given instance is a subclass # of the expected class. For example, the code under test does # late binding to a subclass. elif isinstance(params[0], IsA) and params[0]._IsSubClass(expected): params = params[1:] # Check that each positional param is valid. for i in range(len(params)): try: arg_name = self._args[i] except IndexError: if not self._has_varargs: raise AttributeError('%s does not take %d or more positional ' 'arguments' % (self._method.__name__, i)) else: self._RecordArgumentGiven(arg_name, arg_status) # Check each keyword argument. for arg_name in named_params: if arg_name not in arg_status and not self._has_varkw: raise AttributeError('%s is not expecting keyword argument %s' % (self._method.__name__, arg_name)) self._RecordArgumentGiven(arg_name, arg_status) # Ensure all the required arguments have been given. still_needed = [k for k, v in arg_status.iteritems() if v == MethodSignatureChecker._NEEDED] if still_needed: raise AttributeError('No values given for arguments: %s' % (' '.join(sorted(still_needed)))) class MockMethod(object): """Callable mock method. A MockMethod should act exactly like the method it mocks, accepting parameters and returning a value, or throwing an exception (as specified). When this method is called, it can optionally verify whether the called method (name and signature) matches the expected method. """ def __init__(self, method_name, call_queue, replay_mode, method_to_mock=None, description=None): """Construct a new mock method. Args: # method_name: the name of the method # call_queue: deque of calls, verify this call against the head, or add # this call to the queue. # replay_mode: False if we are recording, True if we are verifying calls # against the call queue. # method_to_mock: The actual method being mocked, used for introspection. # description: optionally, a descriptive name for this method. Typically # this is equal to the descriptive name of the method's class. method_name: str call_queue: list or deque replay_mode: bool method_to_mock: a method object description: str or None """ self._name = method_name self.__name__ = method_name self._call_queue = call_queue if not isinstance(call_queue, deque): self._call_queue = deque(self._call_queue) self._replay_mode = replay_mode self._description = description self._params = None self._named_params = None self._return_value = None self._exception = None self._side_effects = None try: self._checker = MethodSignatureChecker(method_to_mock) except ValueError: self._checker = None def __call__(self, *params, **named_params): """Log parameters and return the specified return value. If the Mock(Anything/Object) associated with this call is in record mode, this MockMethod will be pushed onto the expected call queue. If the mock is in replay mode, this will pop a MockMethod off the top of the queue and verify this call is equal to the expected call. Raises: UnexpectedMethodCall if this call is supposed to match an expected method call and it does not. """ self._params = params self._named_params = named_params if not self._replay_mode: if self._checker is not None: self._checker.Check(params, named_params) self._call_queue.append(self) return self expected_method = self._VerifyMethodCall() if expected_method._side_effects: result = expected_method._side_effects(*params, **named_params) if expected_method._return_value is None: expected_method._return_value = result if expected_method._exception: raise expected_method._exception return expected_method._return_value def __getattr__(self, name): """Raise an AttributeError with a helpful message.""" raise AttributeError('MockMethod has no attribute "%s". ' 'Did you remember to put your mocks in replay mode?' % name) def __iter__(self): """Raise a TypeError with a helpful message.""" raise TypeError('MockMethod cannot be iterated. ' 'Did you remember to put your mocks in replay mode?') def next(self): """Raise a TypeError with a helpful message.""" raise TypeError('MockMethod cannot be iterated. ' 'Did you remember to put your mocks in replay mode?') def _PopNextMethod(self): """Pop the next method from our call queue.""" try: return self._call_queue.popleft() except IndexError: raise UnexpectedMethodCallError(self, None) def _VerifyMethodCall(self): """Verify the called method is expected. This can be an ordered method, or part of an unordered set. Returns: The expected mock method. Raises: UnexpectedMethodCall if the method called was not expected. """ expected = self._PopNextMethod() # Loop here, because we might have a MethodGroup followed by another # group. while isinstance(expected, MethodGroup): expected, method = expected.MethodCalled(self) if method is not None: return method # This is a mock method, so just check equality. if expected != self: raise UnexpectedMethodCallError(self, expected) return expected def __str__(self): params = ', '.join( [repr(p) for p in self._params or []] + ['%s=%r' % x for x in sorted((self._named_params or {}).items())]) full_desc = "%s(%s) -> %r" % (self._name, params, self._return_value) if self._description: full_desc = "%s.%s" % (self._description, full_desc) return full_desc def __eq__(self, rhs): """Test whether this MockMethod is equivalent to another MockMethod. Args: # rhs: the right hand side of the test rhs: MockMethod """ return (isinstance(rhs, MockMethod) and self._name == rhs._name and self._params == rhs._params and self._named_params == rhs._named_params) def __ne__(self, rhs): """Test whether this MockMethod is not equivalent to another MockMethod. Args: # rhs: the right hand side of the test rhs: MockMethod """ return not self == rhs def GetPossibleGroup(self): """Returns a possible group from the end of the call queue or None if no other methods are on the stack. """ # Remove this method from the tail of the queue so we can add it to a group. this_method = self._call_queue.pop() assert this_method == self # Determine if the tail of the queue is a group, or just a regular ordered # mock method. group = None try: group = self._call_queue[-1] except IndexError: pass return group def _CheckAndCreateNewGroup(self, group_name, group_class): """Checks if the last method (a possible group) is an instance of our group_class. Adds the current method to this group or creates a new one. Args: group_name: the name of the group. group_class: the class used to create instance of this new group """ group = self.GetPossibleGroup() # If this is a group, and it is the correct group, add the method. if isinstance(group, group_class) and group.group_name() == group_name: group.AddMethod(self) return self # Create a new group and add the method. new_group = group_class(group_name) new_group.AddMethod(self) self._call_queue.append(new_group) return self def InAnyOrder(self, group_name="default"): """Move this method into a group of unordered calls. A group of unordered calls must be defined together, and must be executed in full before the next expected method can be called. There can be multiple groups that are expected serially, if they are given different group names. The same group name can be reused if there is a standard method call, or a group with a different name, spliced between usages. Args: group_name: the name of the unordered group. Returns: self """ return self._CheckAndCreateNewGroup(group_name, UnorderedGroup) def MultipleTimes(self, group_name="default"): """Move this method into group of calls which may be called multiple times. A group of repeating calls must be defined together, and must be executed in full before the next expected method can be called. Args: group_name: the name of the unordered group. Returns: self """ return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup) def AndReturn(self, return_value): """Set the value to return when this method is called. Args: # return_value can be anything. """ self._return_value = return_value return return_value def AndRaise(self, exception): """Set the exception to raise when this method is called. Args: # exception: the exception to raise when this method is called. exception: Exception """ self._exception = exception def WithSideEffects(self, side_effects): """Set the side effects that are simulated when this method is called. Args: side_effects: A callable which modifies the parameters or other relevant state which a given test case depends on. Returns: Self for chaining with AndReturn and AndRaise. """ self._side_effects = side_effects return self class Comparator: """Base class for all Mox comparators. A Comparator can be used as a parameter to a mocked method when the exact value is not known. For example, the code you are testing might build up a long SQL string that is passed to your mock DAO. You're only interested that the IN clause contains the proper primary keys, so you can set your mock up as follows: mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result) Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'. A Comparator may replace one or more parameters, for example: # return at most 10 rows mock_dao.RunQuery(StrContains('SELECT'), 10) or # Return some non-deterministic number of rows mock_dao.RunQuery(StrContains('SELECT'), IsA(int)) """ def equals(self, rhs): """Special equals method that all comparators must implement. Args: rhs: any python object """ raise NotImplementedError, 'method must be implemented by a subclass.' def __eq__(self, rhs): return self.equals(rhs) def __ne__(self, rhs): return not self.equals(rhs) class Is(Comparator): """Comparison class used to check identity, instead of equality.""" def __init__(self, obj): self._obj = obj def equals(self, rhs): return rhs is self._obj def __repr__(self): return "" % (self._obj, id(self._obj)) class IsA(Comparator): """This class wraps a basic Python type or class. It is used to verify that a parameter is of the given type or class. Example: mock_dao.Connect(IsA(DbConnectInfo)) """ def __init__(self, class_name): """Initialize IsA Args: class_name: basic python type or a class """ self._class_name = class_name def equals(self, rhs): """Check to see if the RHS is an instance of class_name. Args: # rhs: the right hand side of the test rhs: object Returns: bool """ try: return isinstance(rhs, self._class_name) except TypeError: # Check raw types if there was a type error. This is helpful for # things like cStringIO.StringIO. return type(rhs) == type(self._class_name) def _IsSubClass(self, clazz): """Check to see if the IsA comparators class is a subclass of clazz. Args: # clazz: a class object Returns: bool """ try: return issubclass(self._class_name, clazz) except TypeError: # Check raw types if there was a type error. This is helpful for # things like cStringIO.StringIO. return type(clazz) == type(self._class_name) def __repr__(self): return 'mox.IsA(%s) ' % str(self._class_name) class IsAlmost(Comparator): """Comparison class used to check whether a parameter is nearly equal to a given value. Generally useful for floating point numbers. Example mock_dao.SetTimeout((IsAlmost(3.9))) """ def __init__(self, float_value, places=7): """Initialize IsAlmost. Args: float_value: The value for making the comparison. places: The number of decimal places to round to. """ self._float_value = float_value self._places = places def equals(self, rhs): """Check to see if RHS is almost equal to float_value Args: rhs: the value to compare to float_value Returns: bool """ try: return round(rhs-self._float_value, self._places) == 0 except Exception: # This is probably because either float_value or rhs is not a number. return False def __repr__(self): return str(self._float_value) class StrContains(Comparator): """Comparison class used to check whether a substring exists in a string parameter. This can be useful in mocking a database with SQL passed in as a string parameter, for example. Example: mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result) """ def __init__(self, search_string): """Initialize. Args: # search_string: the string you are searching for search_string: str """ self._search_string = search_string def equals(self, rhs): """Check to see if the search_string is contained in the rhs string. Args: # rhs: the right hand side of the test rhs: object Returns: bool """ try: return rhs.find(self._search_string) > -1 except Exception: return False def __repr__(self): return '' % self._search_string class Regex(Comparator): """Checks if a string matches a regular expression. This uses a given regular expression to determine equality. """ def __init__(self, pattern, flags=0): """Initialize. Args: # pattern is the regular expression to search for pattern: str # flags passed to re.compile function as the second argument flags: int """ self.regex = re.compile(pattern, flags=flags) def equals(self, rhs): """Check to see if rhs matches regular expression pattern. Returns: bool """ try: return self.regex.search(rhs) is not None except Exception: return False def __repr__(self): s = '' % str(self._key) class Not(Comparator): """Checks whether a predicates is False. Example: mock_dao.UpdateUsers(Not(ContainsKeyValue('stevepm', stevepm_user_info))) """ def __init__(self, predicate): """Initialize. Args: # predicate: a Comparator instance. """ assert isinstance(predicate, Comparator), ("predicate %r must be a" " Comparator." % predicate) self._predicate = predicate def equals(self, rhs): """Check to see whether the predicate is False. Args: rhs: A value that will be given in argument of the predicate. Returns: bool """ try: return not self._predicate.equals(rhs) except Exception: return False def __repr__(self): return '' % self._predicate class ContainsKeyValue(Comparator): """Checks whether a key/value pair is in a dict parameter. Example: mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info)) """ def __init__(self, key, value): """Initialize. Args: # key: a key in a dict # value: the corresponding value """ self._key = key self._value = value def equals(self, rhs): """Check whether the given key/value pair is in the rhs dict. Returns: bool """ try: return rhs[self._key] == self._value except Exception: return False def __repr__(self): return '' % (str(self._key), str(self._value)) class ContainsAttributeValue(Comparator): """Checks whether a passed parameter contains attributes with a given value. Example: mock_dao.UpdateSomething(ContainsAttribute('stevepm', stevepm_user_info)) """ def __init__(self, key, value): """Initialize. Args: # key: an attribute name of an object # value: the corresponding value """ self._key = key self._value = value def equals(self, rhs): """Check whether the given attribute has a matching value in the rhs object. Returns: bool """ try: return getattr(rhs, self._key) == self._value except Exception: return False class SameElementsAs(Comparator): """Checks whether sequences contain the same elements (ignoring order). Example: mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki')) """ def __init__(self, expected_seq): """Initialize. Args: expected_seq: a sequence """ # Store in case expected_seq is an iterator. self._expected_list = list(expected_seq) def equals(self, actual_seq): """Check to see whether actual_seq has same elements as expected_seq. Args: actual_seq: sequence Returns: bool """ try: # Store in case actual_seq is an iterator. We potentially iterate twice: # once to make the dict, once in the list fallback. actual_list = list(actual_seq) except TypeError: # actual_seq cannot be read as a sequence. # # This happens because Mox uses __eq__ both to check object equality (in # MethodSignatureChecker) and to invoke Comparators. return False try: expected = dict([(element, None) for element in self._expected_list]) actual = dict([(element, None) for element in actual_list]) except TypeError: # Fall back to slower list-compare if any of the objects are unhashable. expected = self._expected_list actual = actual_list expected.sort() actual.sort() return expected == actual def __repr__(self): return '' % self._expected_list class And(Comparator): """Evaluates one or more Comparators on RHS and returns an AND of the results. """ def __init__(self, *args): """Initialize. Args: *args: One or more Comparator """ self._comparators = args def equals(self, rhs): """Checks whether all Comparators are equal to rhs. Args: # rhs: can be anything Returns: bool """ for comparator in self._comparators: if not comparator.equals(rhs): return False return True def __repr__(self): return '' % str(self._comparators) class Or(Comparator): """Evaluates one or more Comparators on RHS and returns an OR of the results. """ def __init__(self, *args): """Initialize. Args: *args: One or more Mox comparators """ self._comparators = args def equals(self, rhs): """Checks whether any Comparator is equal to rhs. Args: # rhs: can be anything Returns: bool """ for comparator in self._comparators: if comparator.equals(rhs): return True return False def __repr__(self): return '' % str(self._comparators) class Func(Comparator): """Call a function that should verify the parameter passed in is correct. You may need the ability to perform more advanced operations on the parameter in order to validate it. You can use this to have a callable validate any parameter. The callable should return either True or False. Example: def myParamValidator(param): # Advanced logic here return True mock_dao.DoSomething(Func(myParamValidator), true) """ def __init__(self, func): """Initialize. Args: func: callable that takes one parameter and returns a bool """ self._func = func def equals(self, rhs): """Test whether rhs passes the function test. rhs is passed into func. Args: rhs: any python object Returns: the result of func(rhs) """ return self._func(rhs) def __repr__(self): return str(self._func) class IgnoreArg(Comparator): """Ignore an argument. This can be used when we don't care about an argument of a method call. Example: # Check if CastMagic is called with 3 as first arg and 'disappear' as third. mymock.CastMagic(3, IgnoreArg(), 'disappear') """ def equals(self, unused_rhs): """Ignores arguments and returns True. Args: unused_rhs: any python object Returns: always returns True """ return True def __repr__(self): return '' class Value(Comparator): """Compares argument against a remembered value. To be used in conjunction with Remember comparator. See Remember() for example. """ def __init__(self): self._value = None self._has_value = False def store_value(self, rhs): self._value = rhs self._has_value = True def equals(self, rhs): if not self._has_value: return False else: return rhs == self._value def __repr__(self): if self._has_value: return "" % self._value else: return "" class Remember(Comparator): """Remembers the argument to a value store. To be used in conjunction with Value comparator. Example: # Remember the argument for one method call. users_list = Value() mock_dao.ProcessUsers(Remember(users_list)) # Check argument against remembered value. mock_dao.ReportUsers(users_list) """ def __init__(self, value_store): if not isinstance(value_store, Value): raise TypeError("value_store is not an instance of the Value class") self._value_store = value_store def equals(self, rhs): self._value_store.store_value(rhs) return True def __repr__(self): return "" % id(self._value_store) class MethodGroup(object): """Base class containing common behaviour for MethodGroups.""" def __init__(self, group_name): self._group_name = group_name def group_name(self): return self._group_name def __str__(self): return '<%s "%s">' % (self.__class__.__name__, self._group_name) def AddMethod(self, mock_method): raise NotImplementedError def MethodCalled(self, mock_method): raise NotImplementedError def IsSatisfied(self): raise NotImplementedError class UnorderedGroup(MethodGroup): """UnorderedGroup holds a set of method calls that may occur in any order. This construct is helpful for non-deterministic events, such as iterating over the keys of a dict. """ def __init__(self, group_name): super(UnorderedGroup, self).__init__(group_name) self._methods = [] def __str__(self): return '%s "%s" pending calls:\n%s' % ( self.__class__.__name__, self._group_name, "\n".join(str(method) for method in self._methods)) def AddMethod(self, mock_method): """Add a method to this group. Args: mock_method: A mock method to be added to this group. """ self._methods.append(mock_method) def MethodCalled(self, mock_method): """Remove a method call from the group. If the method is not in the set, an UnexpectedMethodCallError will be raised. Args: mock_method: a mock method that should be equal to a method in the group. Returns: The mock method from the group Raises: UnexpectedMethodCallError if the mock_method was not in the group. """ # Check to see if this method exists, and if so, remove it from the set # and return it. for method in self._methods: if method == mock_method: # Remove the called mock_method instead of the method in the group. # The called method will match any comparators when equality is checked # during removal. The method in the group could pass a comparator to # another comparator during the equality check. self._methods.remove(mock_method) # If this group is not empty, put it back at the head of the queue. if not self.IsSatisfied(): mock_method._call_queue.appendleft(self) return self, method raise UnexpectedMethodCallError(mock_method, self) def IsSatisfied(self): """Return True if there are not any methods in this group.""" return len(self._methods) == 0 class MultipleTimesGroup(MethodGroup): """MultipleTimesGroup holds methods that may be called any number of times. Note: Each method must be called at least once. This is helpful, if you don't know or care how many times a method is called. """ def __init__(self, group_name): super(MultipleTimesGroup, self).__init__(group_name) self._methods = set() self._methods_left = set() def AddMethod(self, mock_method): """Add a method to this group. Args: mock_method: A mock method to be added to this group. """ self._methods.add(mock_method) self._methods_left.add(mock_method) def MethodCalled(self, mock_method): """Remove a method call from the group. If the method is not in the set, an UnexpectedMethodCallError will be raised. Args: mock_method: a mock method that should be equal to a method in the group. Returns: The mock method from the group Raises: UnexpectedMethodCallError if the mock_method was not in the group. """ # Check to see if this method exists, and if so add it to the set of # called methods. for method in self._methods: if method == mock_method: self._methods_left.discard(method) # Always put this group back on top of the queue, because we don't know # when we are done. mock_method._call_queue.appendleft(self) return self, method if self.IsSatisfied(): next_method = mock_method._PopNextMethod(); return next_method, None else: raise UnexpectedMethodCallError(mock_method, self) def IsSatisfied(self): """Return True if all methods in this group are called at least once.""" return len(self._methods_left) == 0 class MoxMetaTestBase(type): """Metaclass to add mox cleanup and verification to every test. As the mox unit testing class is being constructed (MoxTestBase or a subclass), this metaclass will modify all test functions to call the CleanUpMox method of the test class after they finish. This means that unstubbing and verifying will happen for every test with no additional code, and any failures will result in test failures as opposed to errors. """ def __init__(cls, name, bases, d): type.__init__(cls, name, bases, d) # also get all the attributes from the base classes to account # for a case when test class is not the immediate child of MoxTestBase for base in bases: for attr_name in dir(base): if attr_name not in d: d[attr_name] = getattr(base, attr_name) for func_name, func in d.items(): if func_name.startswith('test') and callable(func): setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func)) @staticmethod def CleanUpTest(cls, func): """Adds Mox cleanup code to any MoxTestBase method. Always unsets stubs after a test. Will verify all mocks for tests that otherwise pass. Args: cls: MoxTestBase or subclass; the class whose test method we are altering. func: method; the method of the MoxTestBase test class we wish to alter. Returns: The modified method. """ def new_method(self, *args, **kwargs): mox_obj = getattr(self, 'mox', None) stubout_obj = getattr(self, 'stubs', None) cleanup_mox = False cleanup_stubout = False if mox_obj and isinstance(mox_obj, Mox): cleanup_mox = True if stubout_obj and isinstance(stubout_obj, stubout.StubOutForTesting): cleanup_stubout = True try: func(self, *args, **kwargs) finally: if cleanup_mox: mox_obj.UnsetStubs() if cleanup_stubout: stubout_obj.UnsetAll() stubout_obj.SmartUnsetAll() if cleanup_mox: mox_obj.VerifyAll() new_method.__name__ = func.__name__ new_method.__doc__ = func.__doc__ new_method.__module__ = func.__module__ return new_method class MoxTestBase(unittest.TestCase): """Convenience test class to make stubbing easier. Sets up a "mox" attribute which is an instance of Mox (any mox tests will want this), and a "stubs" attribute that is an instance of StubOutForTesting (needed at times). Also automatically unsets any stubs and verifies that all mock methods have been called at the end of each test, eliminating boilerplate code. """ __metaclass__ = MoxMetaTestBase def setUp(self): super(MoxTestBase, self).setUp() self.mox = Mox() self.stubs = stubout.StubOutForTesting() # This code is part of Ansible, but is an independent component. # This particular file snippet, and this file snippet only, is BSD licensed. # Modules you write using this snippet, which is embedded dynamically by Ansible # still belong to the author of the module, and may assign their own license # to the complete work. # # Copyright (c) 2015 Peter Sprygada, # Copyright (c) 2017 Red Hat Inc. # # Redistribution and use in source and binary forms, with or without modification, # are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE # USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import json from difflib import Differ from copy import deepcopy from ansible.module_utils._text import to_text, to_bytes from ansible.module_utils.basic import env_fallback from ansible.module_utils.network.common.utils import to_list from ansible.module_utils.connection import Connection from ansible.module_utils.network.common.netconf import NetconfConnection try: from ncclient.xml_ import to_xml HAS_NCCLIENT = True except ImportError: HAS_NCCLIENT = False try: from lxml import etree HAS_XML = True except ImportError: HAS_XML = False _EDIT_OPS = frozenset(['merge', 'create', 'replace', 'delete']) BASE_1_0 = "{urn:ietf:params:xml:ns:netconf:base:1.0}" NS_DICT = { 'BASE_NSMAP': {"xc": "urn:ietf:params:xml:ns:netconf:base:1.0"}, 'BANNERS_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-infra-infra-cfg"}, 'INTERFACES_NSMAP': {None: "http://openconfig.net/yang/interfaces"}, 'INSTALL_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-installmgr-admin-oper"}, 'HOST-NAMES_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-shellutil-cfg"}, 'M:TYPE_NSMAP': {"idx": "urn:ietf:params:xml:ns:yang:iana-if-type"}, 'ETHERNET_NSMAP': {None: "http://openconfig.net/yang/interfaces/ethernet"}, 'CETHERNET_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-drivers-media-eth-cfg"}, 'INTERFACE-CONFIGURATIONS_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"}, 'INFRA-STATISTICS_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-infra-statsd-oper"}, 'INTERFACE-PROPERTIES_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-oper"}, 'IP-DOMAIN_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-ip-domain-cfg"}, 'SYSLOG_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-infra-syslog-cfg"}, 'AAA_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-aaa-lib-cfg"}, 'AAA_LOCALD_NSMAP': {None: "http://cisco.com/ns/yang/Cisco-IOS-XR-aaa-locald-cfg"}, } iosxr_provider_spec = { 'host': dict(), 'port': dict(type='int'), 'username': dict(fallback=(env_fallback, ['ANSIBLE_NET_USERNAME'])), 'password': dict(fallback=(env_fallback, ['ANSIBLE_NET_PASSWORD']), no_log=True), 'ssh_keyfile': dict(fallback=(env_fallback, ['ANSIBLE_NET_SSH_KEYFILE']), type='path'), 'timeout': dict(type='int'), 'transport': dict(type='str', default='cli', choices=['cli', 'netconf']), } iosxr_argument_spec = { 'provider': dict(type='dict', options=iosxr_provider_spec) } command_spec = { 'command': dict(), 'prompt': dict(default=None), 'answer': dict(default=None) } iosxr_top_spec = { 'host': dict(removed_in_version=2.9), 'port': dict(removed_in_version=2.9, type='int'), 'username': dict(removed_in_version=2.9), 'password': dict(removed_in_version=2.9, no_log=True), 'ssh_keyfile': dict(removed_in_version=2.9, type='path'), 'timeout': dict(removed_in_version=2.9, type='int'), } iosxr_argument_spec.update(iosxr_top_spec) def get_provider_argspec(): return iosxr_provider_spec def get_connection(module): if hasattr(module, 'connection'): return module.connection capabilities = get_device_capabilities(module) network_api = capabilities.get('network_api') if network_api == 'cliconf': module.connection = Connection(module._socket_path) elif network_api == 'netconf': module.connection = NetconfConnection(module._socket_path) else: module.fail_json(msg='Invalid connection type {!s}'.format(network_api)) return module.connection def get_device_capabilities(module): if hasattr(module, 'capabilities'): return module.capabilities capabilities = Connection(module._socket_path).get_capabilities() module.capabilities = json.loads(capabilities) return module.capabilities def build_xml_subtree(container_ele, xmap, param=None, opcode=None): sub_root = container_ele meta_subtree = list() for key, meta in xmap.items(): candidates = meta.get('xpath', "").split("/") if container_ele.tag == candidates[-2]: parent = container_ele elif sub_root.tag == candidates[-2]: parent = sub_root else: parent = sub_root.find(".//" + meta.get('xpath', "").split(sub_root.tag + '/', 1)[1].rsplit('/', 1)[0]) if ((opcode in ('delete', 'merge') and meta.get('operation', 'unknown') == 'edit') or meta.get('operation', None) is None): if meta.get('tag', False) is True: if parent.tag == container_ele.tag: if meta.get('ns', False) is True: child = etree.Element(candidates[-1], nsmap=NS_DICT[key.upper() + "_NSMAP"]) else: child = etree.Element(candidates[-1]) meta_subtree.append(child) sub_root = child else: if meta.get('ns', False) is True: child = etree.SubElement(parent, candidates[-1], nsmap=NS_DICT[key.upper() + "_NSMAP"]) else: child = etree.SubElement(parent, candidates[-1]) if meta.get('attrib', None) is not None and opcode in ('delete', 'merge'): child.set(BASE_1_0 + meta.get('attrib'), opcode) continue text = None param_key = key.split(":") if param_key[0] == 'a': if param is not None and param.get(param_key[1], None) is not None: text = param.get(param_key[1]) elif param_key[0] == 'm': if meta.get('value', None) is not None: text = meta.get('value') if text: if meta.get('ns', False) is True: child = etree.SubElement(parent, candidates[-1], nsmap=NS_DICT[key.upper() + "_NSMAP"]) else: child = etree.SubElement(parent, candidates[-1]) child.text = text if meta.get('attrib', None) is not None and opcode in ('delete', 'merge'): child.set(BASE_1_0 + meta.get('attrib'), opcode) if len(meta_subtree) > 1: for item in meta_subtree: container_ele.append(item) if sub_root == container_ele: return None else: return sub_root def build_xml(container, xmap=None, params=None, opcode=None): ''' Builds netconf xml rpc document from meta-data Args: container: the YANG container within the namespace xmap: meta-data map to build xml tree params: Input params that feed xml tree values opcode: operation to be performed (merge, delete etc.) Example: Module inputs: banner_params = [{'banner':'motd', 'text':'Ansible banner example', 'state':'present'}] Meta-data definition: bannermap = collections.OrderedDict() bannermap.update([ ('banner', {'xpath' : 'banners/banner', 'tag' : True, 'attrib' : "operation"}), ('a:banner', {'xpath' : 'banner/banner-name'}), ('a:text', {'xpath' : 'banner/banner-text', 'operation' : 'edit'}) ]) Fields: key: exact match to the key in arg_spec for a parameter (prefixes --> a: value fetched from arg_spec, m: value fetched from meta-data) xpath: xpath of the element (based on YANG model) tag: True if no text on the element attrib: attribute to be embedded in the element (e.g. xc:operation="merge") operation: if edit --> includes the element in edit_config() query else ignores for get() queries value: if key is prefixed with "m:", value is required in meta-data Output: motd Ansible banner example :returns: xml rpc document as a string ''' if opcode == 'filter': root = etree.Element("filter", type="subtree") elif opcode in ('delete', 'merge'): root = etree.Element("config", nsmap=NS_DICT['BASE_NSMAP']) container_ele = etree.SubElement(root, container, nsmap=NS_DICT[container.upper() + "_NSMAP"]) if xmap is not None: if params is None: build_xml_subtree(container_ele, xmap, opcode=opcode) else: subtree_list = list() for param in to_list(params): subtree_ele = build_xml_subtree(container_ele, xmap, param=param, opcode=opcode) if subtree_ele is not None: subtree_list.append(subtree_ele) for item in subtree_list: container_ele.append(item) return etree.tostring(root) def etree_find(root, node): try: element = etree.fromstring(root).find('.//' + to_bytes(node, errors='surrogate_then_replace').strip()) except Exception: element = etree.fromstring(etree.tostring(root)).find('.//' + to_bytes(node, errors='surrogate_then_replace').strip()) if element is not None: return element return None def etree_findall(root, node): try: element = etree.fromstring(root).findall('.//' + to_bytes(node, errors='surrogate_then_replace').strip()) except Exception: element = etree.fromstring(etree.tostring(root)).findall('.//' + to_bytes(node, errors='surrogate_then_replace').strip()) if element is not None: return element return None def is_cliconf(module): capabilities = get_device_capabilities(module) network_api = capabilities.get('network_api') if network_api not in ('cliconf', 'netconf'): module.fail_json(msg=('unsupported network_api: {!s}'.format(network_api))) return False if network_api == 'cliconf': return True return False def is_netconf(module): capabilities = get_device_capabilities(module) network_api = capabilities.get('network_api') if network_api not in ('cliconf', 'netconf'): module.fail_json(msg=('unsupported network_api: {!s}'.format(network_api))) return False if network_api == 'netconf': if not HAS_NCCLIENT: module.fail_json(msg=('ncclient is not installed')) if not HAS_XML: module.fail_json(msg=('lxml is not installed')) return True return False def get_config_diff(module, running=None, candidate=None): conn = get_connection(module) if is_cliconf(module): return conn.get('show commit changes diff') elif is_netconf(module): if running and candidate: running_data = running.split("\n", 1)[1].rsplit("\n", 1)[0] candidate_data = candidate.split("\n", 1)[1].rsplit("\n", 1)[0] if running_data != candidate_data: d = Differ() diff = list(d.compare(running_data.splitlines(), candidate_data.splitlines())) return '\n'.join(diff).strip() return None def discard_config(module): conn = get_connection(module) conn.discard_changes() def commit_config(module, comment=None, confirmed=False, confirm_timeout=None, persist=False, check=False): conn = get_connection(module) reply = None if check: reply = conn.validate() else: if is_netconf(module): reply = conn.commit(confirmed=confirmed, timeout=confirm_timeout, persist=persist) elif is_cliconf(module): reply = conn.commit(comment=comment) return reply def get_oper(module, filter=None): conn = get_connection(module) if filter is not None: response = conn.get(filter) else: return None return to_bytes(etree.tostring(response), errors='surrogate_then_replace').strip() def get_config(module, config_filter=None, source='running'): conn = get_connection(module) # Note: Does not cache config in favour of latest config on every get operation. out = conn.get_config(source=source, filter=config_filter) if is_netconf(module): out = to_xml(conn.get_config(source=source, filter=config_filter)) cfg = out.strip() return cfg def load_config(module, command_filter, commit=False, replace=False, comment=None, admin=False, running=None, nc_get_filter=None): conn = get_connection(module) diff = None if is_netconf(module): # FIXME: check for platform behaviour and restore this # conn.lock(target = 'candidate') # conn.discard_changes() try: for filter in to_list(command_filter): conn.edit_config(filter) candidate = get_config(module, source='candidate', config_filter=nc_get_filter) diff = get_config_diff(module, running, candidate) if commit and diff: commit_config(module) else: discard_config(module) finally: # conn.unlock(target = 'candidate') pass elif is_cliconf(module): # to keep the pre-cliconf behaviour, make a copy, avoid adding commands to input list cmd_filter = deepcopy(command_filter) cmd_filter.insert(0, 'configure terminal') if admin: cmd_filter.insert(0, 'admin') conn.edit_config(cmd_filter) if module._diff: diff = get_config_diff(module) if commit: commit_config(module, comment=comment) conn.edit_config('end') else: conn.discard_changes() return diff def run_command(module, commands): conn = get_connection(module) responses = list() for cmd in to_list(commands): try: cmd = json.loads(cmd) command = cmd['command'] prompt = cmd['prompt'] answer = cmd['answer'] except: command = cmd prompt = None answer = None out = conn.get(command, prompt, answer) try: responses.append(to_text(out, errors='surrogate_or_strict')) except UnicodeError: module.fail_json(msg=u'failed to decode output from {0}:{1}'.format(cmd, to_text(out))) return responses #!/usr/bin/env python # # Copyright (C) 2009 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from types import ListType, DictionaryType """Contains a client to communicate with the Contacts servers. For documentation on the Contacts API, see: http://code.google.com/apis/contatcs/ """ __author__ = 'vinces1979@gmail.com (Vince Spicer)' import gdata.client import gdata.contacts.data import atom.client import atom.data import atom.http_core import gdata.gauth DEFAULT_BATCH_URL = ('https://www.google.com/m8/feeds/contacts/default/full' '/batch') DEFAULT_PROFILES_BATCH_URL = ('https://www.google.com/m8/feeds/profiles/domain/' '%s/full/batch') class ContactsClient(gdata.client.GDClient): api_version = '3' auth_service = 'cp' server = "www.google.com" contact_list = "default" auth_scopes = gdata.gauth.AUTH_SCOPES['cp'] ssl = True def __init__(self, domain=None, auth_token=None, **kwargs): """Constructs a new client for the Email Settings API. Args: domain: string The Google Apps domain (if any). kwargs: The other parameters to pass to the gdata.client.GDClient constructor. """ gdata.client.GDClient.__init__(self, auth_token=auth_token, **kwargs) self.domain = domain def get_feed_uri(self, kind='contacts', contact_list=None, projection='full', scheme="https"): """Builds a feed URI. Args: kind: The type of feed to return, typically 'groups' or 'contacts'. Default value: 'contacts'. contact_list: The contact list to return a feed for. Default value: self.contact_list. projection: The projection to apply to the feed contents, for example 'full', 'base', 'base/12345', 'full/batch'. Default value: 'full'. scheme: The URL scheme such as 'http' or 'https', None to return a relative URI without hostname. Returns: A feed URI using the given kind, contact list, and projection. Example: '/m8/feeds/contacts/default/full'. """ contact_list = contact_list or self.contact_list if kind == 'profiles': contact_list = 'domain/%s' % self.domain prefix = scheme and '%s://%s' % (scheme, self.server) or '' return '%s/m8/feeds/%s/%s/%s' % (prefix, kind, contact_list, projection) GetFeedUri = get_feed_uri def get_contact(self, uri, desired_class=gdata.contacts.data.ContactEntry, auth_token=None, **kwargs): return self.get_entry(uri, auth_token=auth_token, desired_class=desired_class, **kwargs) GetContact = get_contact def create_contact(self, new_contact, insert_uri=None, auth_token=None, **kwargs): """Adds an new contact to Google Contacts. Args: new_contact: atom.Entry or subclass A new contact which is to be added to Google Contacts. insert_uri: the URL to post new contacts to the feed url_params: dict (optional) Additional URL parameters to be included in the insertion request. escape_params: boolean (optional) If true, the url_parameters will be escaped before they are included in the request. Returns: On successful insert, an entry containing the contact created On failure, a RequestError is raised of the form: {'status': HTTP status code from server, 'reason': HTTP reason from the server, 'body': HTTP body of the server's response} """ insert_uri = insert_uri or self.GetFeedUri() return self.Post(new_contact, insert_uri, auth_token=auth_token, **kwargs) CreateContact = create_contact def add_contact(self, new_contact, insert_uri=None, auth_token=None, billing_information=None, birthday=None, calendar_link=None, **kwargs): """Adds an new contact to Google Contacts. Args: new_contact: atom.Entry or subclass A new contact which is to be added to Google Contacts. insert_uri: the URL to post new contacts to the feed url_params: dict (optional) Additional URL parameters to be included in the insertion request. escape_params: boolean (optional) If true, the url_parameters will be escaped before they are included in the request. Returns: On successful insert, an entry containing the contact created On failure, a RequestError is raised of the form: {'status': HTTP status code from server, 'reason': HTTP reason from the server, 'body': HTTP body of the server's response} """ contact = gdata.contacts.data.ContactEntry() if billing_information is not None: if not isinstance(billing_information, gdata.contacts.data.BillingInformation): billing_information = gdata.contacts.data.BillingInformation(text=billing_information) contact.billing_information = billing_information if birthday is not None: if not isinstance(birthday, gdata.contacts.data.Birthday): birthday = gdata.contacts.data.Birthday(when=birthday) contact.birthday = birthday if calendar_link is not None: if type(calendar_link) is not ListType: calendar_link = [calendar_link] for link in calendar_link: if not isinstance(link, gdata.contacts.data.CalendarLink): if type(link) is not DictionaryType: raise TypeError, "calendar_link Requires dictionary not %s" % type(link) link = gdata.contacts.data.CalendarLink( rel=link.get("rel", None), label=link.get("label", None), primary=link.get("primary", None), href=link.get("href", None), ) contact.calendar_link.append(link) insert_uri = insert_uri or self.GetFeedUri() return self.Post(contact, insert_uri, auth_token=auth_token, **kwargs) AddContact = add_contact def get_contacts(self, uri=None, desired_class=gdata.contacts.data.ContactsFeed, auth_token=None, **kwargs): """Obtains a feed with the contacts belonging to the current user. Args: auth_token: An object which sets the Authorization HTTP header in its modify_request method. Recommended classes include gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken among others. Represents the current user. Defaults to None and if None, this method will look for a value in the auth_token member of SpreadsheetsClient. desired_class: class descended from atom.core.XmlElement to which a successful response should be converted. If there is no converter function specified (desired_class=None) then the desired_class will be used in calling the atom.core.parse function. If neither the desired_class nor the converter is specified, an HTTP reponse object will be returned. Defaults to gdata.spreadsheets.data.SpreadsheetsFeed. """ uri = uri or self.GetFeedUri() return self.get_feed(uri, auth_token=auth_token, desired_class=desired_class, **kwargs) GetContacts = get_contacts def get_group(self, uri=None, desired_class=gdata.contacts.data.GroupEntry, auth_token=None, **kwargs): """ Get a single groups details Args: uri: the group uri or id """ return self.get_entry(uri, desired_class=desired_class, auth_token=auth_token, **kwargs) GetGroup = get_group def get_groups(self, uri=None, desired_class=gdata.contacts.data.GroupsFeed, auth_token=None, **kwargs): uri = uri or self.GetFeedUri('groups') return self.get_feed(uri, desired_class=desired_class, auth_token=auth_token, **kwargs) GetGroups = get_groups def create_group(self, new_group, insert_uri=None, url_params=None, desired_class=None, **kwargs): insert_uri = insert_uri or self.GetFeedUri('groups') return self.Post(new_group, insert_uri, url_params=url_params, desired_class=desired_class, **kwargs) CreateGroup = create_group def update_group(self, edit_uri, updated_group, url_params=None, escape_params=True, desired_class=None, auth_token=None, **kwargs): return self.Put(updated_group, self._CleanUri(edit_uri), url_params=url_params, escape_params=escape_params, desired_class=desired_class, auth_token=auth_token, **kwargs) UpdateGroup = update_group def delete_group(self, group_object, auth_token=None, force=False, **kws): return self.Delete(group_object, auth_token=auth_token, force=force, **kws) DeleteGroup = delete_group def change_photo(self, media, contact_entry_or_url, content_type=None, content_length=None, auth_token=None, **kwargs): """Change the photo for the contact by uploading a new photo. Performs a PUT against the photo edit URL to send the binary data for the photo. Args: media: filename, file-like-object, or a gdata.data.MediaSource object to send. contact_entry_or_url: ContactEntry or str If it is a ContactEntry, this method will search for an edit photo link URL and perform a PUT to the URL. content_type: str (optional) the mime type for the photo data. This is necessary if media is a file or file name, but if media is a MediaSource object then the media object can contain the mime type. If media_type is set, it will override the mime type in the media object. content_length: int or str (optional) Specifying the content length is only required if media is a file-like object. If media is a filename, the length is determined using os.path.getsize. If media is a MediaSource object, it is assumed that it already contains the content length. """ ifmatch_header = None if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): photo_link = contact_entry_or_url.GetPhotoLink() uri = photo_link.href ifmatch_header = atom.client.CustomHeaders( **{'if-match': photo_link.etag}) else: uri = contact_entry_or_url if isinstance(media, gdata.data.MediaSource): payload = media # If the media object is a file-like object, then use it as the file # handle in the in the MediaSource. elif hasattr(media, 'read'): payload = gdata.data.MediaSource(file_handle=media, content_type=content_type, content_length=content_length) # Assume that the media object is a file name. else: payload = gdata.data.MediaSource(content_type=content_type, content_length=content_length, file_path=media) return self.Put(uri=uri, data=payload, auth_token=auth_token, ifmatch_header=ifmatch_header, **kwargs) ChangePhoto = change_photo def get_photo(self, contact_entry_or_url, auth_token=None, **kwargs): """Retrives the binary data for the contact's profile photo as a string. Args: contact_entry_or_url: a gdata.contacts.ContactEntry object or a string containing the photo link's URL. If the contact entry does not contain a photo link, the image will not be fetched and this method will return None. """ # TODO: add the ability to write out the binary image data to a file, # reading and writing a chunk at a time to avoid potentially using up # large amounts of memory. url = None if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): photo_link = contact_entry_or_url.GetPhotoLink() if photo_link: url = photo_link.href else: url = contact_entry_or_url if url: return self.Get(url, auth_token=auth_token, **kwargs).read() else: return None GetPhoto = get_photo def delete_photo(self, contact_entry_or_url, auth_token=None, **kwargs): """Delete the contact's profile photo. Args: contact_entry_or_url: a gdata.contacts.ContactEntry object or a string containing the photo link's URL. """ uri = None ifmatch_header = None if isinstance(contact_entry_or_url, gdata.contacts.data.ContactEntry): photo_link = contact_entry_or_url.GetPhotoLink() if photo_link.etag: uri = photo_link.href ifmatch_header = atom.client.CustomHeaders( **{'if-match': photo_link.etag}) else: # No etag means no photo has been assigned to this contact. return else: uri = contact_entry_or_url if uri: self.Delete(entry_or_uri=uri, auth_token=auth_token, ifmatch_header=ifmatch_header, **kwargs) DeletePhoto = delete_photo def get_profiles_feed(self, uri=None, auth_token=None, **kwargs): """Retrieves a feed containing all domain's profiles. Args: uri: string (optional) the URL to retrieve the profiles feed, for example /m8/feeds/profiles/default/full Returns: On success, a ProfilesFeed containing the profiles. On failure, raises a RequestError. """ uri = uri or self.GetFeedUri('profiles') return self.get_feed(uri, auth_token=auth_token, desired_class=gdata.contacts.data.ProfilesFeed, **kwargs) GetProfilesFeed = get_profiles_feed def get_profile(self, uri, auth_token=None, **kwargs): """Retrieves a domain's profile for the user. Args: uri: string the URL to retrieve the profiles feed, for example /m8/feeds/profiles/default/full/username Returns: On success, a ProfileEntry containing the profile for the user. On failure, raises a RequestError """ return self.get_entry(uri, desired_class=gdata.contacts.data.ProfileEntry, auth_token=auth_token, **kwargs) GetProfile = get_profile def update_profile(self, updated_profile, auth_token=None, force=False, **kwargs): """Updates an existing profile. Args: updated_profile: atom.Entry or subclass containing the Atom Entry which will replace the profile which is stored at the edit_url. auth_token: An object which sets the Authorization HTTP header in its modify_request method. Recommended classes include gdata.gauth.ClientLoginToken and gdata.gauth.AuthSubToken among others. Represents the current user. Defaults to None and if None, this method will look for a value in the auth_token member of ContactsClient. force: boolean stating whether an update should be forced. Defaults to False. Normally, if a change has been made since the passed in entry was obtained, the server will not overwrite the entry since the changes were based on an obsolete version of the entry. Setting force to True will cause the update to silently overwrite whatever version is present. url_params: dict (optional) Additional URL parameters to be included in the insertion request. escape_params: boolean (optional) If true, the url_parameters will be escaped before they are included in the request. Returns: On successful update, a httplib.HTTPResponse containing the server's response to the PUT request. On failure, raises a RequestError. """ return self.Update(updated_profile, auth_token=auth_token, force=force, **kwargs) UpdateProfile = update_profile def execute_batch(self, batch_feed, url=DEFAULT_BATCH_URL, desired_class=None, auth_token=None, **kwargs): """Sends a batch request feed to the server. Args: batch_feed: gdata.contacts.ContactFeed A feed containing batch request entries. Each entry contains the operation to be performed on the data contained in the entry. For example an entry with an operation type of insert will be used as if the individual entry had been inserted. url: str The batch URL to which these operations should be applied. converter: Function (optional) The function used to convert the server's response to an object. Returns: The results of the batch request's execution on the server. If the default converter is used, this is stored in a ContactsFeed. """ return self.Post(batch_feed, url, desired_class=desired_class, auth_token=None, **kwargs) ExecuteBatch = execute_batch def execute_batch_profiles(self, batch_feed, url=None, desired_class=gdata.contacts.data.ProfilesFeed, auth_token=None, **kwargs): """Sends a batch request feed to the server. Args: batch_feed: gdata.profiles.ProfilesFeed A feed containing batch request entries. Each entry contains the operation to be performed on the data contained in the entry. For example an entry with an operation type of insert will be used as if the individual entry had been inserted. url: string The batch URL to which these operations should be applied. converter: Function (optional) The function used to convert the server's response to an object. The default value is gdata.profiles.ProfilesFeedFromString. Returns: The results of the batch request's execution on the server. If the default converter is used, this is stored in a ProfilesFeed. """ url = url or (DEFAULT_PROFILES_BATCH_URL % self.domain) return self.Post(batch_feed, url, desired_class=desired_class, auth_token=auth_token, **kwargs) ExecuteBatchProfiles = execute_batch_profiles def _CleanUri(self, uri): """Sanitizes a feed URI. Args: uri: The URI to sanitize, can be relative or absolute. Returns: The given URI without its http://server prefix, if any. Keeps the leading slash of the URI. """ url_prefix = 'http://%s' % self.server if uri.startswith(url_prefix): uri = uri[len(url_prefix):] return uri class ContactsQuery(gdata.client.Query): """ Create a custom Contacts Query Full specs can be found at: U{Contacts query parameters reference } """ def __init__(self, feed=None, group=None, orderby=None, showdeleted=None, sortorder=None, requirealldeleted=None, **kwargs): """ @param max_results: The maximum number of entries to return. If you want to receive all of the contacts, rather than only the default maximum, you can specify a very large number for max-results. @param start-index: The 1-based index of the first result to be retrieved. @param updated-min: The lower bound on entry update dates. @param group: Constrains the results to only the contacts belonging to the group specified. Value of this parameter specifies group ID @param orderby: Sorting criterion. The only supported value is lastmodified. @param showdeleted: Include deleted contacts in the returned contacts feed @pram sortorder: Sorting order direction. Can be either ascending or descending. @param requirealldeleted: Only relevant if showdeleted and updated-min are also provided. It dictates the behavior of the server in case it detects that placeholders of some entries deleted since the point in time specified as updated-min may have been lost. """ gdata.client.Query.__init__(self, **kwargs) self.group = group self.orderby = orderby self.sortorder = sortorder self.showdeleted = showdeleted def modify_request(self, http_request): if self.group: gdata.client._add_query_param('group', self.group, http_request) if self.orderby: gdata.client._add_query_param('orderby', self.orderby, http_request) if self.sortorder: gdata.client._add_query_param('sortorder', self.sortorder, http_request) if self.showdeleted: gdata.client._add_query_param('showdeleted', self.showdeleted, http_request) gdata.client.Query.modify_request(self, http_request) ModifyRequest = modify_request class ProfilesQuery(gdata.client.Query): """ Create a custom Profiles Query Full specs can be found at: U{Profiless query parameters reference } """ def __init__(self, feed=None, start_key=None, **kwargs): """ @param start_key: Opaque key of the first element to retrieve. Present in the next link of an earlier request, if further pages of response are available. """ gdata.client.Query.__init__(self, **kwargs) self.feed = feed or 'https://www.google.com/m8/feeds/profiles/default/full' self.start_key = start_key def modify_request(self, http_request): if self.start_key: gdata.client._add_query_param('start-key', self.start_key, http_request) gdata.client.Query.modify_request(self, http_request) ModifyRequest = modify_request """ Python 'utf-16' Codec Written by Marc-Andre Lemburg (mal@lemburg.com). (c) Copyright CNRI, All Rights Reserved. NO WARRANTY. """ import codecs, sys ### Codec APIs encode = codecs.utf_16_encode def decode(input, errors='strict'): return codecs.utf_16_decode(input, errors, True) class IncrementalEncoder(codecs.IncrementalEncoder): def __init__(self, errors='strict'): codecs.IncrementalEncoder.__init__(self, errors) self.encoder = None def encode(self, input, final=False): if self.encoder is None: result = codecs.utf_16_encode(input, self.errors)[0] if sys.byteorder == 'little': self.encoder = codecs.utf_16_le_encode else: self.encoder = codecs.utf_16_be_encode return result return self.encoder(input, self.errors)[0] def reset(self): codecs.IncrementalEncoder.reset(self) self.encoder = None class IncrementalDecoder(codecs.BufferedIncrementalDecoder): def __init__(self, errors='strict'): codecs.BufferedIncrementalDecoder.__init__(self, errors) self.decoder = None def _buffer_decode(self, input, errors, final): if self.decoder is None: (output, consumed, byteorder) = \ codecs.utf_16_ex_decode(input, errors, 0, final) if byteorder == -1: self.decoder = codecs.utf_16_le_decode elif byteorder == 1: self.decoder = codecs.utf_16_be_decode elif consumed >= 2: raise UnicodeError("UTF-16 stream does not start with BOM") return (output, consumed) return self.decoder(input, self.errors, final) def reset(self): codecs.BufferedIncrementalDecoder.reset(self) self.decoder = None class StreamWriter(codecs.StreamWriter): def __init__(self, stream, errors='strict'): self.bom_written = False codecs.StreamWriter.__init__(self, stream, errors) def encode(self, input, errors='strict'): self.bom_written = True result = codecs.utf_16_encode(input, errors) if sys.byteorder == 'little': self.encode = codecs.utf_16_le_encode else: self.encode = codecs.utf_16_be_encode return result class StreamReader(codecs.StreamReader): def reset(self): codecs.StreamReader.reset(self) try: del self.decode except AttributeError: pass def decode(self, input, errors='strict'): (object, consumed, byteorder) = \ codecs.utf_16_ex_decode(input, errors, 0, False) if byteorder == -1: self.decode = codecs.utf_16_le_decode elif byteorder == 1: self.decode = codecs.utf_16_be_decode elif consumed>=2: raise UnicodeError,"UTF-16 stream does not start with BOM" return (object, consumed) ### encodings module API def getregentry(): return codecs.CodecInfo( name='utf-16', encode=encode, decode=decode, incrementalencoder=IncrementalEncoder, incrementaldecoder=IncrementalDecoder, streamreader=StreamReader, streamwriter=StreamWriter, ) # Copyright 2015-2016 NEC Corporation. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import mock from neutron.tests import base from networking_nec.nwa.l2.rpc import nwa_proxy_api class TestNECNWAProxyApi(base.BaseTestCase): @mock.patch('neutron.common.rpc.get_client') def setUp(self, f1): super(TestNECNWAProxyApi, self).setUp() self.proxy = nwa_proxy_api.NECNWAProxyApi("dummy-topic", "dummy-tenant-id") self.context = mock.MagicMock() def test__send_msg_true(self): msg = mock.MagicMock() self.assertTrue( self.proxy._send_msg(self.context, msg, blocking=True) ) def test__send_msg_false(self): msg = mock.MagicMock() self.assertTrue( self.proxy._send_msg(self.context, msg, blocking=False) ) def test_create_general_dev(self): tenant_id = '844eb55f21e84a289e9c22098d387e5d' nwa_tenant_id = 'DC1_' + tenant_id nwa_info = {} self.proxy.create_general_dev(self.context, tenant_id, nwa_tenant_id, nwa_info) def test_delete_general_dev(self): tenant_id = '844eb55f21e84a289e9c22098d387e5d' nwa_tenant_id = 'DC1_' + tenant_id nwa_info = {} self.proxy.delete_general_dev(self.context, tenant_id, nwa_tenant_id, nwa_info) #!/usr/bin/env python import subprocess import socket import time from os import environ import inspect, os, sys # From http://stackoverflow.com/questions/279237/python-import-a-module-from-a-folder cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],".."))) if cmd_subfolder not in sys.path: sys.path.insert(0, cmd_subfolder) import mosq_test rc = 1 mid = 3265 keepalive = 60 connect_packet = mosq_test.gen_connect("pub-qos1-disco-test", keepalive=keepalive, clean_session=False) connack_packet = mosq_test.gen_connack(rc=0) subscribe_packet = mosq_test.gen_subscribe(mid, "qos1/disconnect/test", 1) suback_packet = mosq_test.gen_suback(mid, 1) mid = 1 publish_packet = mosq_test.gen_publish("qos1/disconnect/test", qos=1, mid=mid, payload="disconnect-message") publish_dup_packet = mosq_test.gen_publish("qos1/disconnect/test", qos=1, mid=mid, payload="disconnect-message", dup=True) puback_packet = mosq_test.gen_puback(mid) mid = 3266 publish2_packet = mosq_test.gen_publish("qos1/outgoing", qos=1, mid=mid, payload="outgoing-message") puback2_packet = mosq_test.gen_puback(mid) broker = subprocess.Popen(['../../src/mosquitto', '-c', '03-publish-b2c-disconnect-qos1.conf'], stderr=subprocess.PIPE) try: time.sleep(0.5) sock = mosq_test.do_client_connect(connect_packet, connack_packet) sock.send(subscribe_packet) if mosq_test.expect_packet(sock, "suback", suback_packet): pub = subprocess.Popen(['./03-publish-b2c-disconnect-qos1-helper.py']) pub.wait() # Should have now received a publish command if mosq_test.expect_packet(sock, "publish", publish_packet): # Send our outgoing message. When we disconnect the broker # should get rid of it and assume we're going to retry. sock.send(publish2_packet) sock.close() sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock.settimeout(60) # 60 seconds timeout is much longer than 5 seconds message retry. sock.connect(("localhost", 1888)) sock.send(connect_packet) if mosq_test.expect_packet(sock, "connack", connack_packet): if mosq_test.expect_packet(sock, "dup publish", publish_dup_packet): sock.send(puback_packet) rc = 0 sock.close() finally: broker.terminate() broker.wait() if rc: (stdo, stde) = broker.communicate() print(stde) exit(rc) from __future__ import with_statement from decimal import Decimal, InvalidOperation import time from django.core import serializers from django.db import models from django.db.models import Q from django.db.models.signals import post_save from django.db.utils import DatabaseError from django.dispatch.dispatcher import receiver from django.test import TestCase from django.utils.unittest import expectedFailure, skip from .fields import ListField, SetField, DictField, EmbeddedModelField def count_calls(func): def wrapper(*args, **kwargs): wrapper.calls += 1 return func(*args, **kwargs) wrapper.calls = 0 return wrapper class Target(models.Model): index = models.IntegerField() class Source(models.Model): target = models.ForeignKey(Target) index = models.IntegerField() class DecimalModel(models.Model): decimal = models.DecimalField(max_digits=9, decimal_places=2) class DecimalKey(models.Model): decimal = models.DecimalField(max_digits=9, decimal_places=2, primary_key=True) class DecimalParent(models.Model): child = models.ForeignKey(DecimalKey) class DecimalsList(models.Model): decimals = ListField(models.ForeignKey(DecimalKey)) class ListModel(models.Model): integer = models.IntegerField(primary_key=True) floating_point = models.FloatField() names = ListField(models.CharField) names_with_default = ListField(models.CharField(max_length=500), default=[]) names_nullable = ListField(models.CharField(max_length=500), null=True) class OrderedListModel(models.Model): ordered_ints = ListField(models.IntegerField(max_length=500), default=[], ordering=count_calls(lambda x: x), null=True) ordered_nullable = ListField(ordering=lambda x: x, null=True) class SetModel(models.Model): setfield = SetField(models.IntegerField()) class DictModel(models.Model): dictfield = DictField(models.IntegerField) dictfield_nullable = DictField(null=True) auto_now = DictField(models.DateTimeField(auto_now=True)) class EmbeddedModelFieldModel(models.Model): simple = EmbeddedModelField('EmbeddedModel', null=True) simple_untyped = EmbeddedModelField(null=True) decimal_parent = EmbeddedModelField(DecimalParent, null=True) typed_list = ListField(EmbeddedModelField('SetModel')) typed_list2 = ListField(EmbeddedModelField('EmbeddedModel')) untyped_list = ListField(EmbeddedModelField()) untyped_dict = DictField(EmbeddedModelField()) ordered_list = ListField(EmbeddedModelField(), ordering=lambda obj: obj.index) class EmbeddedModel(models.Model): some_relation = models.ForeignKey(DictModel, null=True) someint = models.IntegerField(db_column='custom') auto_now = models.DateTimeField(auto_now=True) auto_now_add = models.DateTimeField(auto_now_add=True) class IterableFieldsTest(TestCase): floats = [5.3, 2.6, 9.1, 1.58] names = [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura'] unordered_ints = [4, 2, 6, 1] def setUp(self): for i, float in zip(range(1, 5), IterableFieldsTest.floats): ListModel(integer=i, floating_point=float, names=IterableFieldsTest.names[:i]).save() def test_startswith(self): self.assertEquals( dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__startswith='Sa')]), dict([(3, ['Kakashi', 'Naruto', 'Sasuke']), (4, ['Kakashi', 'Naruto', 'Sasuke', 'Sakura']), ])) def test_options(self): self.assertEqual([entity.names_with_default for entity in ListModel.objects.filter(names__startswith='Sa')], [[], []]) self.assertEqual([entity.names_nullable for entity in ListModel.objects.filter(names__startswith='Sa')], [None, None]) def test_default_value(self): # Make sure default value is copied. ListModel().names_with_default.append(2) self.assertEqual(ListModel().names_with_default, []) def test_ordering(self): f = OrderedListModel._meta.fields[1] f.ordering.calls = 0 # Ensure no ordering happens on assignment. obj = OrderedListModel() obj.ordered_ints = self.unordered_ints self.assertEqual(f.ordering.calls, 0) obj.save() self.assertEqual(OrderedListModel.objects.get().ordered_ints, sorted(self.unordered_ints)) # Ordering should happen only once, i.e. the order function may # be called N times at most (N being the number of items in the # list). self.assertLessEqual(f.ordering.calls, len(self.unordered_ints)) def test_gt(self): self.assertEquals( dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__gt='Kakashi')]), dict([(2, [u'Kakashi', u'Naruto']), (3, [u'Kakashi', u'Naruto', u'Sasuke']), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ])) def test_lt(self): self.assertEquals( dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__lt='Naruto')]), dict([(1, [u'Kakashi']), (2, [u'Kakashi', u'Naruto']), (3, [u'Kakashi', u'Naruto', u'Sasuke']), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ])) def test_gte(self): self.assertEquals( dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__gte='Sakura')]), dict([(3, [u'Kakashi', u'Naruto', u'Sasuke']), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ])) def test_lte(self): self.assertEquals( dict([(entity.pk, entity.names) for entity in ListModel.objects.filter(names__lte='Kakashi')]), dict([(1, [u'Kakashi']), (2, [u'Kakashi', u'Naruto']), (3, [u'Kakashi', u'Naruto', u'Sasuke']), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ])) def test_equals(self): self.assertEquals([entity.names for entity in ListModel.objects.filter(names='Sakura')], [[u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']]) # Test with additonal pk filter (for DBs that have special pk # queries). query = ListModel.objects.filter(names='Sakura') self.assertEquals(query.get(pk=query[0].pk).names, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']) def test_is_null(self): self.assertEquals(ListModel.objects.filter( names__isnull=True).count(), 0) def test_exclude(self): self.assertEquals( dict([(entity.pk, entity.names) for entity in ListModel.objects.all().exclude(names__lt='Sakura')]), dict([(3, [u'Kakashi', u'Naruto', u'Sasuke']), (4, [u'Kakashi', u'Naruto', u'Sasuke', u'Sakura']), ])) def test_chained_filter(self): self.assertEquals( [entity.names for entity in ListModel.objects .filter(names='Sasuke').filter(names='Sakura')], [['Kakashi', 'Naruto', 'Sasuke', 'Sakura'], ]) self.assertEquals( [entity.names for entity in ListModel.objects .filter(names__startswith='Sa').filter(names='Sakura')], [['Kakashi', 'Naruto', 'Sasuke', 'Sakura']]) # Test across multiple columns. On app engine only one filter # is allowed to be an inequality filter. self.assertEquals( [entity.names for entity in ListModel.objects .filter(floating_point=9.1).filter(names__startswith='Sa')], [['Kakashi', 'Naruto', 'Sasuke'], ]) def test_setfield(self): setdata = [1, 2, 3, 2, 1] # At the same time test value conversion. SetModel(setfield=map(str, setdata)).save() item = SetModel.objects.filter(setfield=3)[0] self.assertEqual(item.setfield, set(setdata)) # This shouldn't raise an error because the default value is # an empty list. SetModel().save() def test_dictfield(self): DictModel(dictfield=dict(a=1, b='55', foo=3.14), auto_now={'a': None}).save() item = DictModel.objects.get() self.assertEqual(item.dictfield, {u'a': 1, u'b': 55, u'foo': 3}) dt = item.auto_now['a'] self.assertNotEqual(dt, None) item.save() time.sleep(0.5) # Sleep to avoid false positive failure on the assertion below self.assertGreater(DictModel.objects.get().auto_now['a'], dt) item.delete() # Saving empty dicts shouldn't throw errors. DictModel().save() # Regression tests for djangoappengine issue #39. DictModel.add_to_class('new_dict_field', DictField()) DictModel.objects.get() @skip("GAE specific?") def test_Q_objects(self): self.assertEquals( [entity.names for entity in ListModel.objects .exclude(Q(names__lt='Sakura') | Q(names__gte='Sasuke'))], [['Kakashi', 'Naruto', 'Sasuke', 'Sakura']]) def test_list_with_foreignkeys(self): class ReferenceList(models.Model): keys = ListField(models.ForeignKey('Model')) class Model(models.Model): pass model1 = Model.objects.create() model2 = Model.objects.create() ReferenceList.objects.create(keys=[model1.pk, model2.pk]) self.assertEqual(ReferenceList.objects.get().keys[0], model1.pk) self.assertEqual(ReferenceList.objects.filter(keys=model1.pk).count(), 1) def test_list_with_foreign_conversion(self): decimal = DecimalKey.objects.create(decimal=Decimal('1.5')) DecimalsList.objects.create(decimals=[decimal.pk]) @expectedFailure def test_nested_list(self): """ Some back-ends expect lists to be strongly typed or not contain other lists (e.g. GAE), this limits how the ListField can be used (unless the back-end were to serialize all lists). """ class UntypedListModel(models.Model): untyped_list = ListField() UntypedListModel.objects.create(untyped_list=[1, [2, 3]]) class Child(models.Model): pass class Parent(models.Model): id = models.IntegerField(primary_key=True) integer_list = ListField(models.IntegerField) integer_dict = DictField(models.IntegerField) embedded_list = ListField(EmbeddedModelField(Child)) embedded_dict = DictField(EmbeddedModelField(Child)) class EmbeddedModelFieldTest(TestCase): def assertEqualDatetime(self, d1, d2): """Compares d1 and d2, ignoring microseconds.""" self.assertEqual(d1.replace(microsecond=0), d2.replace(microsecond=0)) def assertNotEqualDatetime(self, d1, d2): self.assertNotEqual(d1.replace(microsecond=0), d2.replace(microsecond=0)) def _simple_instance(self): EmbeddedModelFieldModel.objects.create( simple=EmbeddedModel(someint='5')) return EmbeddedModelFieldModel.objects.get() def test_simple(self): instance = self._simple_instance() self.assertIsInstance(instance.simple, EmbeddedModel) # Make sure get_prep_value is called. self.assertEqual(instance.simple.someint, 5) # Primary keys should not be populated... self.assertEqual(instance.simple.id, None) # ... unless set explicitly. instance.simple.id = instance.id instance.save() instance = EmbeddedModelFieldModel.objects.get() self.assertEqual(instance.simple.id, instance.id) def _test_pre_save(self, instance, get_field): # Make sure field.pre_save is called for embedded objects. from time import sleep instance.save() auto_now = get_field(instance).auto_now auto_now_add = get_field(instance).auto_now_add self.assertNotEqual(auto_now, None) self.assertNotEqual(auto_now_add, None) sleep(1) # FIXME instance.save() self.assertNotEqualDatetime(get_field(instance).auto_now, get_field(instance).auto_now_add) instance = EmbeddedModelFieldModel.objects.get() instance.save() # auto_now_add shouldn't have changed now, but auto_now should. self.assertEqualDatetime(get_field(instance).auto_now_add, auto_now_add) self.assertGreater(get_field(instance).auto_now, auto_now) def test_pre_save(self): obj = EmbeddedModelFieldModel(simple=EmbeddedModel()) self._test_pre_save(obj, lambda instance: instance.simple) def test_pre_save_untyped(self): obj = EmbeddedModelFieldModel(simple_untyped=EmbeddedModel()) self._test_pre_save(obj, lambda instance: instance.simple_untyped) def test_pre_save_in_list(self): obj = EmbeddedModelFieldModel(untyped_list=[EmbeddedModel()]) self._test_pre_save(obj, lambda instance: instance.untyped_list[0]) def test_pre_save_in_dict(self): obj = EmbeddedModelFieldModel(untyped_dict={'a': EmbeddedModel()}) self._test_pre_save(obj, lambda instance: instance.untyped_dict['a']) def test_pre_save_list(self): # Also make sure auto_now{,add} works for embedded object *lists*. EmbeddedModelFieldModel.objects.create(typed_list2=[EmbeddedModel()]) instance = EmbeddedModelFieldModel.objects.get() auto_now = instance.typed_list2[0].auto_now auto_now_add = instance.typed_list2[0].auto_now_add self.assertNotEqual(auto_now, None) self.assertNotEqual(auto_now_add, None) instance.typed_list2.append(EmbeddedModel()) instance.save() instance = EmbeddedModelFieldModel.objects.get() self.assertEqualDatetime(instance.typed_list2[0].auto_now_add, auto_now_add) self.assertGreater(instance.typed_list2[0].auto_now, auto_now) self.assertNotEqual(instance.typed_list2[1].auto_now, None) self.assertNotEqual(instance.typed_list2[1].auto_now_add, None) def test_error_messages(self): for kwargs, expected in ( ({'simple': 42}, EmbeddedModel), ({'simple_untyped': 42}, models.Model), ({'typed_list': [EmbeddedModel()]}, SetModel)): self.assertRaisesRegexp( TypeError, "Expected instance of type %r." % expected, EmbeddedModelFieldModel(**kwargs).save) def test_typed_listfield(self): EmbeddedModelFieldModel.objects.create( typed_list=[SetModel(setfield=range(3)), SetModel(setfield=range(9))], ordered_list=[Target(index=i) for i in xrange(5, 0, -1)]) obj = EmbeddedModelFieldModel.objects.get() self.assertIn(5, obj.typed_list[1].setfield) self.assertEqual([target.index for target in obj.ordered_list], range(1, 6)) def test_untyped_listfield(self): EmbeddedModelFieldModel.objects.create(untyped_list=[ EmbeddedModel(someint=7), OrderedListModel(ordered_ints=range(5, 0, -1)), SetModel(setfield=[1, 2, 2, 3])]) instances = EmbeddedModelFieldModel.objects.get().untyped_list for instance, cls in zip(instances, [EmbeddedModel, OrderedListModel, SetModel]): self.assertIsInstance(instance, cls) self.assertNotEqual(instances[0].auto_now, None) self.assertEqual(instances[1].ordered_ints, range(1, 6)) def test_untyped_dict(self): EmbeddedModelFieldModel.objects.create(untyped_dict={ 'a': SetModel(setfield=range(3)), 'b': DictModel(dictfield={'a': 1, 'b': 2}), 'c': DictModel(dictfield={}, auto_now={'y': 1})}) data = EmbeddedModelFieldModel.objects.get().untyped_dict self.assertIsInstance(data['a'], SetModel) self.assertNotEqual(data['c'].auto_now['y'], None) def test_foreignkey_in_embedded_object(self): simple = EmbeddedModel(some_relation=DictModel.objects.create()) obj = EmbeddedModelFieldModel.objects.create(simple=simple) simple = EmbeddedModelFieldModel.objects.get().simple self.assertNotIn('some_relation', simple.__dict__) self.assertIsInstance(simple.__dict__['some_relation_id'], type(obj.id)) self.assertIsInstance(simple.some_relation, DictModel) def test_embedded_field_with_foreign_conversion(self): decimal = DecimalKey.objects.create(decimal=Decimal('1.5')) decimal_parent = DecimalParent.objects.create(child=decimal) EmbeddedModelFieldModel.objects.create(decimal_parent=decimal_parent) def test_update(self): """ Test that update can be used on an a subset of objects containing collections of embedded instances; see issue #13. Also ensure that updated values are coerced according to collection field. """ child1 = Child.objects.create() child2 = Child.objects.create() parent = Parent.objects.create(pk=1, integer_list=[1], integer_dict={'a': 2}, embedded_list=[child1], embedded_dict={'a': child2}) Parent.objects.filter(pk=1).update( integer_list=['3'], integer_dict={'b': '3'}, embedded_list=[child2], embedded_dict={'b': child1}) parent = Parent.objects.get() self.assertEqual(parent.integer_list, [3]) self.assertEqual(parent.integer_dict, {'b': 3}) self.assertEqual(parent.embedded_list, [child2]) self.assertEqual(parent.embedded_dict, {'b': child1}) class BaseModel(models.Model): pass class ExtendedModel(BaseModel): name = models.CharField(max_length=20) class BaseModelProxy(BaseModel): class Meta: proxy = True class ExtendedModelProxy(ExtendedModel): class Meta: proxy = True class ProxyTest(TestCase): def test_proxy(self): list(BaseModelProxy.objects.all()) def test_proxy_with_inheritance(self): self.assertRaises(DatabaseError, lambda: list(ExtendedModelProxy.objects.all())) class SignalTest(TestCase): def test_post_save(self): created = [] @receiver(post_save, sender=SetModel) def handle(**kwargs): created.append(kwargs['created']) SetModel().save() self.assertEqual(created, [True]) SetModel.objects.get().save() self.assertEqual(created, [True, False]) qs = SetModel.objects.all() list(qs)[0].save() self.assertEqual(created, [True, False, False]) list(qs)[0].save() self.assertEqual(created, [True, False, False, False]) list(qs.select_related())[0].save() self.assertEqual(created, [True, False, False, False, False]) class SelectRelatedTest(TestCase): def test_select_related(self): target = Target(index=5) target.save() Source(target=target, index=8).save() source = Source.objects.all().select_related()[0] self.assertEqual(source.target.pk, target.pk) self.assertEqual(source.target.index, target.index) source = Source.objects.all().select_related('target')[0] self.assertEqual(source.target.pk, target.pk) self.assertEqual(source.target.index, target.index) class DBColumn(models.Model): a = models.IntegerField(db_column='b') class OrderByTest(TestCase): def test_foreign_keys(self): target1 = Target.objects.create(index=1) target2 = Target.objects.create(index=2) source1 = Source.objects.create(target=target1, index=3) source2 = Source.objects.create(target=target2, index=4) self.assertEqual(list(Source.objects.all().order_by('target')), [source1, source2]) self.assertEqual(list(Source.objects.all().order_by('-target')), [source2, source1]) def test_db_column(self): model1 = DBColumn.objects.create(a=1) model2 = DBColumn.objects.create(a=2) self.assertEqual(list(DBColumn.objects.all().order_by('a')), [model1, model2]) self.assertEqual(list(DBColumn.objects.all().order_by('-a')), [model2, model1]) def test_reverse(self): model1 = DBColumn.objects.create(a=1) model2 = DBColumn.objects.create(a=2) self.assertEqual(list(DBColumn.objects.all().order_by('a').reverse()), [model2, model1]) self.assertEqual(list(DBColumn.objects.all().order_by('-a').reverse()), [model1, model2]) def test_chain(self): model1 = Target.objects.create(index=1) model2 = Target.objects.create(index=2) self.assertEqual( list(Target.objects.all().order_by('index').order_by('-index')), [model2, model1]) class SerializableSetModel(models.Model): setfield = SetField(models.IntegerField()) setcharfield = SetField(models.CharField(), null=True) class SerializationTest(TestCase): """ JSON doesn't support sets, so they need to be converted to lists for serialization; see issue #12. TODO: Check if the fix works with embedded models / nested sets. """ names = ['foo', 'bar', 'baz', 'monkey'] def test_json_listfield(self): for i in range(1, 5): ListModel(integer=i, floating_point=0, names=SerializationTest.names[:i]).save() objects = ListModel.objects.all() serialized = serializers.serialize('json', objects) deserialized = serializers.deserialize('json', serialized) for m in deserialized: integer = m.object.integer names = m.object.names self.assertEqual(names, SerializationTest.names[:integer]) def test_json_setfield(self): for i in range(1, 5): SerializableSetModel( setfield=set([i - 1]), setcharfield=set(SerializationTest.names[:i])).save() objects = SerializableSetModel.objects.all() serialized = serializers.serialize('json', objects) deserialized = serializers.deserialize('json', serialized) for m in deserialized: integer = m.object.setfield.pop() names = m.object.setcharfield self.assertEqual(names, set(SerializationTest.names[:integer + 1])) class String(models.Model): s = models.CharField(max_length=20) class LazyObjectsTest(TestCase): def test_translation(self): """ Using a lazy translation call should work just the same as a non-lazy one (or a plain string). """ from django.utils.translation import ugettext_lazy a = String.objects.create(s='a') b = String.objects.create(s=ugettext_lazy('b')) self.assertEqual(String.objects.get(s='a'), a) self.assertEqual(list(String.objects.filter(s='a')), [a]) self.assertEqual(list(String.objects.filter(s__lte='a')), [a]) self.assertEqual(String.objects.get(s=ugettext_lazy('a')), a) self.assertEqual( list(String.objects.filter(s__lte=ugettext_lazy('a'))), [a]) self.assertEqual(String.objects.get(s='b'), b) self.assertEqual(list(String.objects.filter(s='b')), [b]) self.assertEqual(list(String.objects.filter(s__gte='b')), [b]) self.assertEqual(String.objects.get(s=ugettext_lazy('b')), b) self.assertEqual( list(String.objects.filter(s__gte=ugettext_lazy('b'))), [b]) def test_marked_strings(self): """ Check that strings marked as safe or needing escaping do not confuse the back-end. """ from django.utils.safestring import mark_safe, mark_for_escaping a = String.objects.create(s='a') b = String.objects.create(s=mark_safe('b')) c = String.objects.create(s=mark_for_escaping('c')) self.assertEqual(String.objects.get(s='a'), a) self.assertEqual(list(String.objects.filter(s__startswith='a')), [a]) self.assertEqual(String.objects.get(s=mark_safe('a')), a) self.assertEqual( list(String.objects.filter(s__startswith=mark_safe('a'))), [a]) self.assertEqual(String.objects.get(s=mark_for_escaping('a')), a) self.assertEqual( list(String.objects.filter(s__startswith=mark_for_escaping('a'))), [a]) self.assertEqual(String.objects.get(s='b'), b) self.assertEqual(list(String.objects.filter(s__startswith='b')), [b]) self.assertEqual(String.objects.get(s=mark_safe('b')), b) self.assertEqual( list(String.objects.filter(s__startswith=mark_safe('b'))), [b]) self.assertEqual(String.objects.get(s=mark_for_escaping('b')), b) self.assertEqual( list(String.objects.filter(s__startswith=mark_for_escaping('b'))), [b]) self.assertEqual(String.objects.get(s='c'), c) self.assertEqual(list(String.objects.filter(s__startswith='c')), [c]) self.assertEqual(String.objects.get(s=mark_safe('c')), c) self.assertEqual( list(String.objects.filter(s__startswith=mark_safe('c'))), [c]) self.assertEqual(String.objects.get(s=mark_for_escaping('c')), c) self.assertEqual( list(String.objects.filter(s__startswith=mark_for_escaping('c'))), [c]) class FeaturesTest(TestCase): """ Some things are unlikely to cause problems for SQL back-ends, but require special handling in nonrel. """ def test_subqueries(self): """ Django includes SQL statements as WHERE tree values when filtering using a QuerySet -- this won't "just work" with nonrel back-ends. TODO: Subqueries handling may require a bit of Django changing, but should be easy to support. """ target = Target.objects.create(index=1) source = Source.objects.create(index=2, target=target) targets = Target.objects.all() with self.assertRaises(DatabaseError): Source.objects.get(target__in=targets) self.assertEqual( Source.objects.get(target__in=list(targets)), source) class DecimalFieldTest(TestCase): """ Some NoSQL databases can't handle Decimals, so respective back-ends convert them to strings or floats. This can cause some precision and sorting problems. """ def setUp(self): for d in (Decimal('12345.6789'), Decimal('5'), Decimal('345.67'), Decimal('45.6'), Decimal('2345.678'),): DecimalModel(decimal=d).save() def test_filter(self): d = DecimalModel.objects.get(decimal=Decimal('5.0')) self.assertTrue(isinstance(d.decimal, Decimal)) self.assertEquals(str(d.decimal), '5.00') d = DecimalModel.objects.get(decimal=Decimal('45.60')) self.assertEquals(str(d.decimal), '45.60') # Filter argument should be converted to Decimal with 2 decimal #_places. d = DecimalModel.objects.get(decimal='0000345.67333333333333333') self.assertEquals(str(d.decimal), '345.67') def test_order(self): """ Standard Django decimal-to-string conversion isn't monotonic (see `django.db.backends.util.format_number`). """ rows = DecimalModel.objects.all().order_by('decimal') values = list(d.decimal for d in rows) self.assertEquals(values, sorted(values)) def test_sign_extend(self): DecimalModel(decimal=Decimal('-0.0')).save() try: # If we've written a valid string we should be able to # retrieve the DecimalModel object without error. DecimalModel.objects.filter(decimal__lt=1)[0] except InvalidOperation: self.assertTrue(False) class DeleteModel(models.Model): key = models.IntegerField(primary_key=True) deletable = models.BooleanField() class BasicDeleteTest(TestCase): def setUp(self): for i in range(1, 10): DeleteModel(key=i, deletable=i % 2 == 0).save() def test_model_delete(self): d = DeleteModel.objects.get(pk=1) d.delete() with self.assertRaises(DeleteModel.DoesNotExist): DeleteModel.objects.get(pk=1) def test_delete_all(self): DeleteModel.objects.all().delete() self.assertEquals(0, DeleteModel.objects.all().count()) def test_delete_filtered(self): DeleteModel.objects.filter(deletable=True).delete() self.assertEquals(5, DeleteModel.objects.all().count()) class M2MDeleteChildModel(models.Model): key = models.IntegerField(primary_key=True) class M2MDeleteModel(models.Model): key = models.IntegerField(primary_key=True) deletable = models.BooleanField() children = models.ManyToManyField(M2MDeleteChildModel, blank=True) class ManyToManyDeleteTest(TestCase): """ Django-nonrel doesn't support many-to-many, but there may be models that are used which contain them, even if they're not accessed. This test ensures they can be deleted. """ def setUp(self): for i in range(1, 10): M2MDeleteModel(key=i, deletable=i % 2 == 0).save() def test_model_delete(self): d = M2MDeleteModel.objects.get(pk=1) d.delete() with self.assertRaises(M2MDeleteModel.DoesNotExist): M2MDeleteModel.objects.get(pk=1) @expectedFailure def test_delete_all(self): M2MDeleteModel.objects.all().delete() self.assertEquals(0, M2MDeleteModel.objects.all().count()) @expectedFailure def test_delete_filtered(self): M2MDeleteModel.objects.filter(deletable=True).delete() self.assertEquals(5, M2MDeleteModel.objects.all().count()) class QuerysetModel(models.Model): key = models.IntegerField(primary_key=True) class QuerysetTest(TestCase): """ Django 1.6 changes how """ def setUp(self): for i in range(10): QuerysetModel.objects.create(key=i + 1) def test_all(self): self.assertEqual(10, len(QuerysetModel.objects.all())) def test_none(self): self.assertEqual(0, len(QuerysetModel.objects.none())) """ The react module provides functionality for Reactive Programming (RP) and Functional Reactive Programming (FRP). It is a bit difficult to explain what FRP really is. This is because every implementation has its own take on it, and because it requires a bit of a paradigm shift compared to classic event-driven programming. FRP does not have to be difficult and we think our implementation of ``flexx.react`` is relatively easy to use. This brief guide takes you through some of the FRP aspects using code examples. What is FRP ----------- (Don't worry if the next two paragraphs sound complicated; things should start to make sense when we explain thing using code.) *Where event-driven programming is about reacting to things that happen, RP is about staying up to date with changing signals.* In RP the different components in an application communicate via streams of data. In other words, components keep track of (and react to) the *signal values* of other components. All signals (except source/input signals) have one or more upstream signals, and can combine and or modify these to produce a new signal value. The value of each signal is *cached*, so that the operations applied to the signal values only have to be performed when any upstream signal has changed. When a signal changes its value, it will *notify* its downstream signals, so that everything stays up-to-date. In ``flexx.react`` signals are addressed using a string. This may seem unusual at first, but it allows easy binding for signals on classes, allow signal loops, and has other advantages that we'll discuss when we talk about dynamism. Signals ------- A signal can be created by decorating a function. In RP-speak, the function is "lifted" to a signal: .. code-block:: py # The function greet() is used to react to signal "name" @react.connect('name') def greet(n): print('hello %!' % n) The example above looks quite similar to how some event-drive applications allow binding callbacks to events. There are, however, a few differences: a) The greet function has now become a signal object, which has an output of its own (although the output is None in this case, because the function does not return a value, more on that below); b) The function (which we'd call the "callback" in an event driven system) does not accept an event object, but a value that corresponds to the upstream signal value. One other advantage of a RP system is that signals can *connect to multiple upsteam signals*: .. code-block:: py @react.connect('first_name', 'last_name') def greet(first, last): print('hello %s %s!' % (first, last) This is a feature that saves a lot of overhead. For any "callback" that you define, you specify *exactly* what input signals there are, and it will always be up to date. Doing that in an event-driven system quickly results in a spaghetti of callbacks and boilerplate to keep track of state. The function of a signal gets called directly when any of the upstream signals (or the upstream-upstream signals) change. The return value of the function represents the output signal value, which can also be None. When the return value is ``undefined`` (from ``react.undefined`` or ``pyscript.undefined``), the value is ignored and the signal maintains its current value. Source and input signals ------------------------ Signals must start somewhere. The *source signal* has a ``_set()`` method that the programmer can use to set the value of the signal: .. code-block:: py @react.source def name(n): return n The function for this source signal is very simple. You usually want to do some input checking and/or normalization here. Especialy if the input comes from the user, as is the case with the input signal. The *input signal* is a source signal that can be called with an argument to set its value: .. code-block:: py @react.input def name(n='john doe'): if not isinstance(n, str): raise ValueError('Name must be a string') return n.capitalized() # And later ... name('jane doe') You can also see how the default value of the function argument can be used to specify the initial signal value. Source and input signals generally do not have upstream signals, but they can have them. A complete example ------------------ .. code-block:: py @react.input def first_name(s='john'): return str(s) @react.input def last_name(s='doe'): return str(s) @react.connect('first_name', 'last_name') def full_name(first, 'last'): return '%s %s' % (first, last) @react.connect('full_name') def greet(name): print('hello %s!' % name) Lazy signals ------------ In contrast to normal signals, a *lazy signal* does not update immediately when the upstream signals changes. It is updated automatically (lazily) whenever its value is queried. Note that this has little effect when there is a normal signal downstream. Lazy signals can be convenient in a situation where values changes rapidly, while the current value is only needed sparingly. To create, use the ``lazy()`` decorator: .. code-block:: py @react.lazy('first_name', 'last_name') def full_name(first, last): return '%s %s' % (first, last) Caching ------- .. code-block:: py @react.input def data_select(id): return str(id) @react.input def data_clean(clean): return bool(clean) @react.connect('data_select') def data(id): open_connection(id) return get_data_from_the_web() # this may take a while @react.connect('data', 'data_clean') def show_data(data, clean): if clean: data = clean_func(data) plotter.show(data) This hypothetical example shows how caching helps keep apps efficient. The ``data`` signal will only update when the ``data_select`` changes. When ``data_clean`` is changes, the ``show_data`` signal updates, but it will use the cached value of the data. The HasSignals class -------------------- It is often convenient to create classes that have signals. To do so, inherit from the ``HasSignals`` class: .. code-block:: py class Person(react.HasSignals): def __init__(self, father): assert isinstance(father, Person) self.father = father react.HasSignals.__init__(self) @react.input def first_name(s): return s @react.connect('father.last_name') def last_name(s): return s @react.connect('first_name', 'last_name') de greet(first, last): print('hello %s %s!' % (first, last)) The above example show how you can directly refer to signals on the object using their name, and even use dot notation to address the signal of an attribute of the object. It also shows that the signal functions do not have a ``self`` argument. They do not have to, but they can if they needs access to the instance. Dynamism -------- With dynamism, you can refer to signals of signals, and have the signal connections be made automatically. Let's modify the last example a bit: .. code-block:: py class Person(react.HasSignals): def __init__(self, father): self.father(father) react.HasSignals.__init__(self) @react.input def father(f): assert isinstance(f, Person) return f @react.connect('father.last_name') def last_name(s): return s ... In this case, the last name of the father will change when either the father changes, or the father changes its name. Dynamism also supports star notation: .. code-block:: py class Person(react.HasSignals): @react.input def children(cc): assert isinstance(cc, tuple) assert all([isinstance(c, Person) for c in cc]) return cc @react.connect('children.*') def child_names(*names): return ', '.join(name) Signal history -------------- The signal object provides a bit more information than only its value. The most notable is the value of the signal before the last change. .. code-block:: py class Person(react.HasSignals): @react.connect('first_name'): def react_to_name_change(self, new_name): old_name = self.first_name.last_value new_name = self.first_name.value # == new_name The signal value also holds information on value update times, but this is currently private. We'll have to see if this is reliable and convenient enough to make it public. Functional RP ------------- The "F" in FRP stands for functional. Currently, there is limited support for that, for example: .. code-block:: py filter = lambda x: x>0 @react.connect(react.filter(filter, 'number')) def show_positive_numbers(v): print(v) This functionality is to be extended in the future. Some things just are events --------------------------- Many things can be described as changing signal values. Even "left_mouse_down" works pretty well. However, some things really *are* events, like key presses and timers. How to handle these is still something we'd need to work out ... """ from .signals import SignalValueError, Signal, undefined from .signals import Signal, SourceSignal, InputSignal, LazySignal from .decorators import connect, source, input, lazy from .hassignals import HasSignals from .functional import map, filter, reduce, merge # -*- coding: utf-8 -*- from openerp.addons.payment.models.payment_acquirer import ValidationError from openerp.addons.payment.tests.common import PaymentAcquirerCommon from openerp.addons.payment_paypal.controllers.main import PaypalController from openerp.tools import mute_logger from lxml import objectify import urlparse class PaypalCommon(PaymentAcquirerCommon): def setUp(self): super(PaypalCommon, self).setUp() cr, uid = self.cr, self.uid self.base_url = self.registry('ir.config_parameter').get_param(cr, uid, 'web.base.url') # get the paypal account model, self.paypal_id = self.registry('ir.model.data').get_object_reference(cr, uid, 'payment_paypal', 'payment_acquirer_paypal') # tde+seller@openerp.com - tde+buyer@openerp.com - tde+buyer-it@openerp.com # some CC self.amex = (('378282246310005', '123'), ('371449635398431', '123')) self.amex_corporate = (('378734493671000', '123')) self.autralian_bankcard = (('5610591081018250', '123')) self.dinersclub = (('30569309025904', '123'), ('38520000023237', '123')) self.discover = (('6011111111111117', '123'), ('6011000990139424', '123')) self.jcb = (('3530111333300000', '123'), ('3566002020360505', '123')) self.mastercard = (('5555555555554444', '123'), ('5105105105105100', '123')) self.visa = (('4111111111111111', '123'), ('4012888888881881', '123'), ('4222222222222', '123')) self.dankord_pbs = (('76009244561', '123'), ('5019717010103742', '123')) self.switch_polo = (('6331101999990016', '123')) class PaypalServer2Server(PaypalCommon): def test_00_tx_management(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things paypal = self.payment_acquirer.browse(self.cr, self.uid, self.paypal_id, None) self.assertEqual(paypal.environment, 'test', 'test without test environment') res = self.payment_acquirer._paypal_s2s_get_access_token(cr, uid, [self.paypal_id], context=context) self.assertTrue(res[self.paypal_id] is not False, 'paypal: did not generate access token') tx_id = self.payment_transaction.s2s_create( cr, uid, { 'amount': 0.01, 'acquirer_id': self.paypal_id, 'currency_id': self.currency_euro_id, 'reference': 'test_reference', 'partner_id': self.buyer_id, }, { 'number': self.visa[0][0], 'cvc': self.visa[0][1], 'brand': 'visa', 'expiry_mm': 9, 'expiry_yy': 2015, }, context=context ) tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertTrue(tx.paypal_txn_id is not False, 'paypal: txn_id should have been set after s2s request') self.payment_transaction.write(cr, uid, tx_id, {'paypal_txn_id': False}, context=context) class PaypalForm(PaypalCommon): def test_10_paypal_form_render(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things self.payment_acquirer.write(cr, uid, self.paypal_id, {'fees_active': False}, context) paypal = self.payment_acquirer.browse(cr, uid, self.paypal_id, context) self.assertEqual(paypal.environment, 'test', 'test without test environment') # ---------------------------------------- # Test: button direct rendering # ---------------------------------------- # render the button res = self.payment_acquirer.render( cr, uid, self.paypal_id, 'test_ref0', 0.01, self.currency_euro_id, partner_id=None, partner_values=self.buyer_values, context=context) form_values = { 'cmd': '_xclick', 'business': 'tde+paypal-facilitator@openerp.com', 'item_name': 'test_ref0', 'item_number': 'test_ref0', 'first_name': 'Buyer', 'last_name': 'Norbert', 'amount': '0.01', 'currency_code': 'EUR', 'address1': 'Huge Street 2/543', 'city': 'Sin City', 'zip': '1000', 'country': 'Belgium', 'email': 'norbert.buyer@example.com', 'return': '%s' % urlparse.urljoin(self.base_url, PaypalController._return_url), 'notify_url': '%s' % urlparse.urljoin(self.base_url, PaypalController._notify_url), 'cancel_return': '%s' % urlparse.urljoin(self.base_url, PaypalController._cancel_url), } # check form result tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['submit']: continue self.assertEqual( form_input.get('value'), form_values[form_input.get('name')], 'paypal: wrong value for input %s: received %s instead of %s' % (form_input.get('name'), form_input.get('value'), form_values[form_input.get('name')]) ) def test_11_paypal_form_with_fees(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things paypal = self.payment_acquirer.browse(self.cr, self.uid, self.paypal_id, None) self.assertEqual(paypal.environment, 'test', 'test without test environment') # update acquirer: compute fees self.payment_acquirer.write(cr, uid, self.paypal_id, { 'fees_active': True, 'fees_dom_fixed': 1.0, 'fees_dom_var': 0.35, 'fees_int_fixed': 1.5, 'fees_int_var': 0.50, }, context) # render the button res = self.payment_acquirer.render( cr, uid, self.paypal_id, 'test_ref0', 12.50, self.currency_euro, partner_id=None, partner_values=self.buyer_values, context=context) # check form result handling_found = False tree = objectify.fromstring(res) self.assertEqual(tree.get('action'), 'https://www.sandbox.paypal.com/cgi-bin/webscr', 'paypal: wrong form POST url') for form_input in tree.input: if form_input.get('name') in ['handling']: handling_found = True self.assertEqual(form_input.get('value'), '1.57', 'paypal: wrong computed fees') self.assertTrue(handling_found, 'paypal: fees_active did not add handling input in rendered form') @mute_logger('openerp.addons.payment_paypal.models.paypal', 'ValidationError') def test_20_paypal_form_management(self): cr, uid, context = self.cr, self.uid, {} # be sure not to do stupid things paypal = self.payment_acquirer.browse(cr, uid, self.paypal_id, context) self.assertEqual(paypal.environment, 'test', 'test without test environment') # typical data posted by paypal after client has successfully paid paypal_post_data = { 'protection_eligibility': u'Ineligible', 'last_name': u'Poilu', 'txn_id': u'08D73520KX778924N', 'receiver_email': u'tde+paypal-facilitator@openerp.com', 'payment_status': u'Pending', 'payment_gross': u'', 'tax': u'0.00', 'residence_country': u'FR', 'address_state': u'Alsace', 'payer_status': u'verified', 'txn_type': u'web_accept', 'address_street': u'Av. de la Pelouse, 87648672 Mayet', 'handling_amount': u'0.00', 'payment_date': u'03:21:19 Nov 18, 2013 PST', 'first_name': u'Norbert', 'item_name': u'test_ref_2', 'address_country': u'France', 'charset': u'windows-1252', 'custom': u'', 'notify_version': u'3.7', 'address_name': u'Norbert Poilu', 'pending_reason': u'multi_currency', 'item_number': u'test_ref_2', 'receiver_id': u'DEG7Z7MYGT6QA', 'transaction_subject': u'', 'business': u'tde+paypal-facilitator@openerp.com', 'test_ipn': u'1', 'payer_id': u'VTDKRZQSAHYPS', 'verify_sign': u'An5ns1Kso7MWUdW4ErQKJJJ4qi4-AVoiUf-3478q3vrSmqh08IouiYpM', 'address_zip': u'75002', 'address_country_code': u'FR', 'address_city': u'Paris', 'address_status': u'unconfirmed', 'mc_currency': u'EUR', 'shipping': u'0.00', 'payer_email': u'tde+buyer@openerp.com', 'payment_type': u'instant', 'mc_gross': u'1.95', 'ipn_track_id': u'866df2ccd444b', 'quantity': u'1' } # should raise error about unknown tx with self.assertRaises(ValidationError): self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context) # create tx tx_id = self.payment_transaction.create( cr, uid, { 'amount': 1.95, 'acquirer_id': self.paypal_id, 'currency_id': self.currency_euro_id, 'reference': 'test_ref_2', 'partner_name': 'Norbert Buyer', 'partner_country_id': self.country_france_id, }, context=context ) # validate it self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context) # check tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'pending', 'paypal: wrong state after receiving a valid pending notification') self.assertEqual(tx.state_message, 'multi_currency', 'paypal: wrong state message after receiving a valid pending notification') self.assertEqual(tx.paypal_txn_id, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification') self.assertFalse(tx.date_validate, 'paypal: validation date should not be updated whenr receiving pending notification') # update tx self.payment_transaction.write(cr, uid, [tx_id], { 'state': 'draft', 'paypal_txn_id': False, }, context=context) # update notification from paypal paypal_post_data['payment_status'] = 'Completed' # validate it self.payment_transaction.form_feedback(cr, uid, paypal_post_data, 'paypal', context=context) # check tx = self.payment_transaction.browse(cr, uid, tx_id, context=context) self.assertEqual(tx.state, 'done', 'paypal: wrong state after receiving a valid pending notification') self.assertEqual(tx.paypal_txn_id, '08D73520KX778924N', 'paypal: wrong txn_id after receiving a valid pending notification') self.assertEqual(tx.date_validate, '2013-11-18 03:21:19', 'paypal: wrong validation date') # Licensed to the StackStorm, Inc ('StackStorm') under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import logging import st2common.config as config from st2common.transport.bootstrap_utils import register_exchanges def _setup(): config.parse_args() # 2. setup logging. logging.basicConfig(format='%(asctime)s %(levelname)s [-] %(message)s', level=logging.DEBUG) def main(): _setup() register_exchanges() # The scripts sets up Exchanges in RabbitMQ. if __name__ == '__main__': main() from turtle import Turtle, colormode from random import randint import sys def randColor(): return randint(0,255) def drawTriangle(t,dist): t.fillcolor(randColor(),randColor(),randColor()) t.down() t.setheading(0) t.begin_fill() t.forward(dist) t.left(120) t.forward(dist) t.left(120) t.forward(dist) t.setheading(0) t.end_fill() t.up() def sierpinski(t,levels,size): if levels == 0: # Draw triangle drawTriangle(t,size) else: half = size/2 levels -= 1 # Recursive calls sierpinski(t,levels,half) t.setpos(t.xcor()+half,t.ycor()) sierpinski(t,levels,half) t.left(120) t.forward(half) t.setheading(0) sierpinski(t,levels,half) t.right(120) t.forward(half) t.setheading(0) def main(configuration): t = Turtle() t.speed(10) t.up() t.setpos(-configuration['size']/2,-configuration['size']/2) colormode(255) sierpinski(t,configuration['level'],configuration['size']) def start(): configuration = {'level': 2, 'size': 480} if len(sys.argv) >= 2 and sys.argv[1].isdigit(): configuration['level'] = int(sys.argv[1]) if len(sys.argv) == 3 and sys.argv[2].isdigit(): configuration['size'] = int(sys.argv[2]) main(configuration) raw_input("Press ENTER to continue") start() #!/usr/bin/env python # -*- coding: utf-8 -*- # # Copyright 2008-2009 Zuza Software Foundation # # This file is part of the Translate Toolkit. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, see . """Tests for Qt Linguist storage class Reference implementation & tests: gitorious:qt5-tools/src/qttools/tests/auto/linguist/lconvert/data """ from lxml import etree from translate.misc.multistring import multistring from translate.storage import test_base, ts2 as ts from translate.storage.placeables import parse, xliff from translate.storage.placeables.lisa import xml_to_strelem TS_NUMERUS = """ Dialog2 %n files %n cars Age: %1 func3 """ xliffparsers = [] for attrname in dir(xliff): attr = getattr(xliff, attrname) if type(attr) is type and \ attrname not in ('XLIFFPlaceable') and \ hasattr(attr, 'parse') and \ attr.parse is not None: xliffparsers.append(attr.parse) def rich_parse(s): return parse(s, xliffparsers) class TestTSUnit(test_base.TestTranslationUnit): UnitClass = ts.tsunit class TestTSfile(test_base.TestTranslationStore): StoreClass = ts.tsfile def test_basic(self): tsfile = ts.tsfile() assert tsfile.units == [] tsfile.addsourceunit("Bla") assert len(tsfile.units) == 1 newfile = ts.tsfile.parsestring(str(tsfile)) print(str(tsfile)) assert len(newfile.units) == 1 assert newfile.units[0].source == "Bla" assert newfile.findunit("Bla").source == "Bla" assert newfile.findunit("dit") is None def test_source(self): tsfile = ts.tsfile() tsunit = tsfile.addsourceunit("Concept") tsunit.source = "Term" newfile = ts.tsfile.parsestring(str(tsfile)) print(str(tsfile)) assert newfile.findunit("Concept") is None assert newfile.findunit("Term") is not None def test_target(self): tsfile = ts.tsfile() tsunit = tsfile.addsourceunit("Concept") tsunit.target = "Konsep" newfile = ts.tsfile.parsestring(str(tsfile)) print(str(tsfile)) assert newfile.findunit("Concept").target == "Konsep" def test_plurals(self): """Test basic plurals""" tsfile = ts.tsfile() tsunit = tsfile.addsourceunit("File(s)") tsunit.target = [u"Leêr", u"Leêrs"] newfile = ts.tsfile.parsestring(str(tsfile)) print(str(tsfile)) checkunit = newfile.findunit("File(s)") assert checkunit.target == [u"Leêr", u"Leêrs"] assert checkunit.hasplural() def test_language(self): """Check that we can get and set language and sourcelanguage in the header""" tsstr = ''' ''' tsfile = ts.tsfile.parsestring(tsstr) assert tsfile.gettargetlanguage() == 'fr' assert tsfile.getsourcelanguage() == 'de' tsfile.settargetlanguage('pt_BR') assert 'pt_BR' in str(tsfile) assert tsfile.gettargetlanguage() == 'pt-br' # We convert en_US to en tsstr = ''' ''' tsfile = ts.tsfile.parsestring(tsstr) assert tsfile.getsourcelanguage() == 'en' def test_edit(self): """test editing works well""" tsstr = ''' MainWindow ObsoleteString Groepen SourceString TargetString ''' tsfile = ts.tsfile.parsestring(tsstr) tsfile.units[1].settarget('TestTarget') tsfile.units[1].markfuzzy(True) newtsstr = tsstr.decode('utf-8').replace( '>TargetString', ' type="unfinished">TestTarget' ).encode('utf-8') assert newtsstr == str(tsfile) def test_locations(self): """test that locations work well""" tsstr = ''' MainWindow Desktop Settings (Default) Asztali beállítások (Alapértelmezett) Choose style and palette based on your desktop settings. Stílus és paletta alapú kiválasztása az asztali beállításokban. ''' tsfile = ts.tsfile.parsestring(tsstr) assert len(tsfile.units) == 2 assert tsfile.units[0].getlocations() == ['../tools/qtconfig/mainwindow.cpp:+202'] assert tsfile.units[1].getlocations() == ['+5'] def test_merge_with_fuzzies(self): """test that merge with fuzzy works well""" tsstr1 = ''' MainWindow Desktop Settings (Default) Asztali beállítások (Alapértelmezett) Choose style and palette based on your desktop settings. Stílus és paletta alapú kiválasztása az asztali beállításokban. ''' tsstr2 = ''' MainWindow Desktop Settings (Default) Choose style and palette based on your desktop settings. ''' tsfile = ts.tsfile.parsestring(tsstr1) tsfile2 = ts.tsfile.parsestring(tsstr2) assert len(tsfile.units) == 2 assert len(tsfile2.units) == 2 tsfile2.units[0].merge(tsfile.units[0]) # fuzzy tsfile2.units[1].merge(tsfile.units[1]) # not fuzzy assert tsfile2.units[0].isfuzzy() assert not tsfile2.units[1].isfuzzy() def test_getid(self): """test that getid works well""" tsfile = ts.tsfile.parsestring(TS_NUMERUS) assert tsfile.units[0].getid() == "Dialog2%n files" assert tsfile.units[1].getid() == "Dialog2\nthis_is_some_id%n cars" assert tsfile.units[3].getid() == "Dialog2\nthis_is_another_idfunc3" def test_backnforth(self): """test that ts files are read and output properly""" tsfile = ts.tsfile.parsestring(TS_NUMERUS) assert str(tsfile) == TS_NUMERUS __author__ = 'ejs' import errno import glob import logging import inspect import os import sys import platform def mkdirP(path): try: os.makedirs(path) except OSError as exc: # Python >2.5 if exc.errno == errno.EEXIST and os.path.isdir(path): pass else: raise def get_actual_filename(name): if not '\\' in name: return name dirs = name.split('\\') # disk letter test_name = [dirs[0].upper()] for d in dirs[1:]: test_name += ["%s[%s]" % (d[:-1], d[-1])] res = glob.glob('\\'.join(test_name)) if not res: #File not found return None return res[0] _mainScriptDir = None def getScriptDir(module=None, toParent=None): """ Find the directory where the main script is running From http://stackoverflow.com/questions/3718657/how-to-properly-determine-current-script-directory-in-python/22881871#22881871 :param follow_symlinks: :return: """ global _mainScriptDir if module and not _mainScriptDir: if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze path = os.path.abspath(sys.executable) else: path = inspect.getabsfile(module) if not toParent: toParent = "../.." path = os.path.join(path, toParent) # remove our package and module path = os.path.realpath(path) # Windows needs real case for e.g. model path lookups path = get_actual_filename(path) _mainScriptDir = os.path.dirname(path) # our package return _mainScriptDir _libScriptDir = None def getLibScriptDir(): """ Find the directory where the main script is running From http://stackoverflow.com/questions/3718657/how-to-properly-determine-current-script-directory-in-python/22881871#22881871 :param follow_symlinks: :return: """ global _libScriptDir if not _libScriptDir: if getattr(sys, 'frozen', False): # py2exe, PyInstaller, cx_Freeze path = os.path.abspath(sys.executable) else: path = inspect.getabsfile(sys.modules['utils.filesystem']) path = os.path.join(path, "../..") # remove our package and module path = os.path.realpath(path) # Windows needs real case for e.g. model path lookups path = get_actual_filename(path) #print "getLibScriptDir:",path _libScriptDir = os.path.dirname(path) # our package return _libScriptDir def getUserDataDir(): """ Get real user data folder under which the game data can be stored. :return: """ if platform.system() == 'Windows': # HOME is not trustworthy userhome = os.environ.get('USERPROFILE') if not userhome: userhome = os.path.expanduser('~') data_dir = os.path.join(userhome, "AppData", "Roaming") if not os.path.exists(data_dir): data_dir = os.path.join(userhome, "Documents") elif platform.system() == 'Linux': data_dir = os.path.expanduser("~/.config") elif platform.system() == 'Darwin': data_dir = os.path.expanduser("~/Library") else: data_dir = os.path.expanduser("~") return data_dir _tempDir = None def findDataFilename(name, extract=False, executable=False): """ Resolve a filename along Panda's model-path. :param name: :return: filename or None """ from panda3d.core import Filename, getModelPath from panda3d.core import VirtualFileSystem logging.debug("findDataFilename: "+ name +" on: \n" + str(getModelPath().getValue())) vfs = VirtualFileSystem.getGlobalPtr() fileName = Filename(name) vfile = vfs.findFile(fileName, getModelPath().getValue()) if not vfile: if extract and name.endswith(".exe"): fileName = Filename(name[:-4]) vfile = vfs.findFile(fileName, getModelPath().getValue()) if not vfile: return None fileName = vfile.getFilename() if extract: # see if the file is embedded in some virtual place OR has the wrong perms from panda3d.core import SubfileInfo info = SubfileInfo() needsCopy = not vfile.getSystemInfo(info) or info.getFilename() != fileName if not needsCopy: if executable: # see if on Linux or OSX and not executable try: stat = os.stat(fileName.toOsSpecific()) if (stat.st_mode & 0111) == 0: logging.error("Found %s locally, but not marked executable!", fileName) needsCopy = True except: needsCopy = True if needsCopy: # virtual file needs to be copied out global _tempDir if not _tempDir: import tempfile _tempDir = os.path.realpath(tempfile.mkdtemp()) #print "Temp dir:",_tempDir xpath = _tempDir + '/' + fileName.getBasename() xTarg = Filename.fromOsSpecific(xpath) # on Windows, case-sensitivity must be honored for the following to work xTarg.makeCanonical() print "extracting",fileName,"to",xTarg if not xTarg.exists(): if not vfs.copyFile(fileName, xTarg): raise IOError("extraction failed when copying " + str(fileName) + " to " + str(xTarg)) fileName = xTarg os.chmod(fileName.toOsSpecific(), 0777) return fileName def findDataFile(name, extract=False, executable=False): """ Resolve a filename along Panda's model-path. :param name: :return: path or None """ fileName = findDataFilename(name, extract, executable) if not fileName: return None return fileName.toOsSpecific() def toPanda(path): path = path.replace('\\', '/') # make Windows path look Unix-y for the VFS if len(path) > 3 and path[1] == ':' and path[2] == '/': path = '/' + path[0].lower() + path[2:] return path # Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Arithmetic Operations that don't fit into math_ops due to dependencies. To avoid circular dependencies, some math_ops should go here. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import re from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.python.framework import ops from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export # TODO(b/27419586) Change docstring for required dtype of x once int allowed @tf_export('lbeta') def lbeta(x, name=None): r"""Computes \\(ln(|Beta(x)|)\\), reducing along the last dimension. Given one-dimensional `z = [z_0,...,z_{K-1}]`, we define $$Beta(z) = \prod_j Gamma(z_j) / Gamma(\sum_j z_j)$$ And for `n + 1` dimensional `x` with shape `[N1, ..., Nn, K]`, we define $$lbeta(x)[i1, ..., in] = Log(|Beta(x[i1, ..., in, :])|)$$. In other words, the last dimension is treated as the `z` vector. Note that if `z = [u, v]`, then \\(Beta(z) = int_0^1 t^{u-1} (1 - t)^{v-1} dt\\), which defines the traditional bivariate beta function. If the last dimension is empty, we follow the convention that the sum over the empty set is zero, and the product is one. Args: x: A rank `n + 1` `Tensor`, `n >= 0` with type `float`, or `double`. name: A name for the operation (optional). Returns: The logarithm of \\(|Beta(x)|\\) reducing along the last dimension. """ # In the event that the last dimension has zero entries, we return -inf. # This is consistent with a convention that the sum over the empty set 0, and # the product is 1. # This is standard. See https://en.wikipedia.org/wiki/Empty_set. with ops.name_scope(name, 'lbeta', [x]): x = ops.convert_to_tensor(x, name='x') # Note reduce_sum([]) = 0. log_prod_gamma_x = math_ops.reduce_sum( math_ops.lgamma(x), reduction_indices=[-1]) # Note lgamma(0) = infinity, so if x = [] # log_gamma_sum_x = lgamma(0) = infinity, and # log_prod_gamma_x = lgamma(1) = 0, # so result = -infinity sum_x = math_ops.reduce_sum(x, axis=[-1]) log_gamma_sum_x = math_ops.lgamma(sum_x) result = log_prod_gamma_x - log_gamma_sum_x return result @tf_export('math.bessel_i0') def bessel_i0(x, name=None): """Computes the Bessel i0 function of `x` element-wise. Modified Bessel function of order 0. It is preferable to use the numerically stabler function `i0e(x)` instead. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.i0 @end_compatibility """ with ops.name_scope(name, 'bessel_i0', [x]): return math_ops.exp(math_ops.abs(x)) * math_ops.bessel_i0e(x) @tf_export('math.bessel_i1') def bessel_i1(x, name=None): """Computes the Bessel i1 function of `x` element-wise. Modified Bessel function of order 1. It is preferable to use the numerically stabler function `i1e(x)` instead. Args: x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`, `float32`, `float64`. name: A name for the operation (optional). Returns: A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`. @compatibility(scipy) Equivalent to scipy.special.i1 @end_compatibility """ with ops.name_scope(name, 'bessel_i1', [x]): return math_ops.exp(math_ops.abs(x)) * math_ops.bessel_i1e(x) @tf_export('einsum', 'linalg.einsum') def einsum(equation, *inputs, **kwargs): """A generalized contraction between tensors of arbitrary dimension. This function returns a tensor whose elements are defined by `equation`, which is written in a shorthand form inspired by the Einstein summation convention. As an example, consider multiplying two matrices A and B to form a matrix C. The elements of C are given by: ``` C[i,k] = sum_j A[i,j] * B[j,k] ``` The corresponding `equation` is: ``` ij,jk->ik ``` In general, the `equation` is obtained from the more familiar element-wise equation by 1. removing variable names, brackets, and commas, 2. replacing "*" with ",", 3. dropping summation signs, and 4. moving the output to the right, and replacing "=" with "->". Many common operations can be expressed in this way. For example: ```python # Matrix multiplication >>> einsum('ij,jk->ik', m0, m1) # output[i,k] = sum_j m0[i,j] * m1[j, k] # Dot product >>> einsum('i,i->', u, v) # output = sum_i u[i]*v[i] # Outer product >>> einsum('i,j->ij', u, v) # output[i,j] = u[i]*v[j] # Transpose >>> einsum('ij->ji', m) # output[j,i] = m[i,j] # Batch matrix multiplication >>> einsum('aij,ajk->aik', s, t) # out[a,i,k] = sum_j s[a,i,j] * t[a, j, k] ``` This function behaves like `numpy.einsum`, but does not support: * Ellipses (subscripts like `ij...,jk...->ik...`) * Subscripts where an axis appears more than once for a single input (e.g. `ijj,k->ik`). * Subscripts that are summed across multiple inputs (e.g., `ij,ij,jk->ik`). Args: equation: a `str` describing the contraction, in the same format as `numpy.einsum`. *inputs: the inputs to contract (each one a `Tensor`), whose shapes should be consistent with `equation`. name: A name for the operation (optional). Returns: The contracted `Tensor`, with shape determined by `equation`. Raises: ValueError: If - the format of `equation` is incorrect, - the number of inputs implied by `equation` does not match `len(inputs)`, - an axis appears in the output subscripts but not in any of the inputs, - the number of dimensions of an input differs from the number of indices in its subscript, or - the input shapes are inconsistent along a particular axis. """ equation = equation.replace(' ', '') name = kwargs.pop('name', None) if kwargs: raise TypeError('invalid keyword arguments for this function: ' + ', '.join( [format(key) for key in sorted(list(kwargs.keys()))])) with ops.name_scope(name, 'einsum', [equation, inputs]) as name: if '...' in equation: raise ValueError('Subscripts with ellipses are not yet supported.') match = re.match('^([a-zA-Z,]+)(->[a-zA-Z]*)?$', equation) if not match: raise ValueError('Indices have incorrect format: %s' % equation) inputs = list(inputs) input_axis_labels = match.group(1).split(',') if len(inputs) != len(input_axis_labels): raise ValueError('Got %d arguments for equation "%s", expecting %d' % (len(inputs), equation, len(input_axis_labels))) axis_labels = set(''.join(input_axis_labels)) if match.group(2): output_axis_labels = match.group(2)[2:] else: # infer the output subscripts if not given, assume alphabetical order indices = ''.join(sorted(axis_labels)) counts = {ax: 0 for ax in indices} for axes_ in input_axis_labels: for ax in axes_: counts[ax] += 1 output_axis_labels = ''.join( sorted(ax for ax in indices if counts[ax] == 1)) for a in axis_labels: input_count = sum(1 for s in input_axis_labels if a in s) if input_count > 2 and a not in output_axis_labels: logging.warn( 'Falling back to exponential-space implementation of einsum()' ' because index "%s" is summed over more than two inputs.', a) return _exponential_space_einsum(equation, *inputs) temp = inputs[0] temp_axis_labels = input_axis_labels[0] for i in xrange(len(inputs) - 1): axes_to_sum = ( set(temp_axis_labels) & set(input_axis_labels[i + 1]) - set(output_axis_labels)) temp, temp_axis_labels = _einsum_reduction( temp, temp_axis_labels, inputs[i + 1], input_axis_labels[i + 1], axes_to_sum) missing_indices = set(temp_axis_labels) - set(output_axis_labels) if missing_indices: reduction_indices = [ i for i, a in enumerate(temp_axis_labels) if a not in output_axis_labels ] temp = math_ops.reduce_sum(temp, reduction_indices=reduction_indices) temp_axis_labels = ''.join( a for a in temp_axis_labels if a in output_axis_labels) if sorted(temp_axis_labels) != sorted(output_axis_labels): raise ValueError('Invalid equation: %s' % equation) perm = [temp_axis_labels.index(a) for a in output_axis_labels] return _transpose_if_necessary(temp, perm) def _einsum_reduction(t0, t0_axis_labels, t1, t1_axis_labels, axes_to_sum): """Helper for einsum() that computes the result of a two-argument einsum(). Args: t0: a `Tensor` t0_axis_labels: a string of axis labels. This string's length must equal the rank of t0. t1: a `Tensor` t1_axis_labels: a string to axis labels. This string's length must equal the rank of t1. axes_to_sum: set of labels of axes to be summed over Returns: A `Tensor` whose elements are obtained by summing, over all axes in `axes_to_sum`, the corresponding elements of `t0` and `t1`. For example, if t0_axis_labels == 'abijk', t1_axis_labels == 'acjkl', and axes_to_sum == {j,k}, this will return a tensor x where out[a,b,c,i,l] = sum_j sum_k t0[a,b,i,j,k] * t1[a,c,j,k,l] Raises: ValueError: if the rank of `t0` does not match the length of `t0_axis_labels`, or that of `t1` does not match the length of `t1_axis_labels`. """ if len(t0_axis_labels) != len(t0.get_shape()): raise ValueError( 'Tensor t0 of rank %d does not match einsum reduction of length %d' % (len(t0.get_shape()), len(t0_axis_labels))) if len(t1_axis_labels) != len(t1.get_shape()): raise ValueError( 'Tensor t1 of rank %d does not match einsum reduction of length %d' % (len(t1.get_shape()), len(t1_axis_labels))) # This function computes the result of a two-argument einsum() using batch # matrix multiplication. This involves # 1. transposing t0 and t1 so that axes are in the correct order for # batch matrix multiplication, and # 2. reshaping t0 and t1 so that they are both of rank 3. # First, we divide axes into three groups: # * "preserved" axes are present in both inputs and the output # * "summed" axes are present in both inputs but not the output # * "broadcast" axes are present in exactly one input and the output # # As an example, if the einsum is abijk,acjkl->abcil, then "a" is a # preserved axis, "b" and "c" are broadcast axes, and "j" and "k" are # summed axes. assert all(a in t0_axis_labels and a in t1_axis_labels for a in axes_to_sum) preserved_axes = (set(t0_axis_labels) & set(t1_axis_labels)) - axes_to_sum broadcast_axes = {} for i, sym_list in enumerate([t0_axis_labels, t1_axis_labels]): broadcast_axes[i] = set(sym_list) - preserved_axes - axes_to_sum # Reorder the axes so that: # 1. preserved axes come first in both inputs # 2. in input 0, broadcast axes come next, followed by summed axes # 3. in input 1, summed axes come next, followed by broadcast axes def sort_key(input_index, a): if a in preserved_axes: return (-1, a) elif ((input_index == 0 and a in broadcast_axes[0]) or (input_index == 1 and a in axes_to_sum)): return (0, a) else: return (1, a) axis_labels = [t0_axis_labels, t1_axis_labels] sorted_axes = [ sorted(sym_list, key=lambda a: sort_key(i, a)) for i, sym_list in enumerate(axis_labels) ] inputs = [t0, t1] for i, axes_str in enumerate(axis_labels): perm = [axes_str.find(a) for a in sorted_axes[i]] inputs[i] = _transpose_if_necessary(inputs[i], perm) t0, t1 = inputs if not axes_to_sum: # In the special case where there are no axes to sum over, reduce to mul() # rather than to batch matrix multiplication. for _ in broadcast_axes[1]: t0 = array_ops.expand_dims(t0, -1) for _ in broadcast_axes[0]: t1 = array_ops.expand_dims(t1, len(preserved_axes)) product = math_ops.multiply(t0, t1) product_axes = sorted_axes[0] + sorted_axes[1][len(preserved_axes):] return product, ''.join(product_axes) else: # Reduce to matmul(). # Reshape both inputs so as to combine multiple broadcast axes # into a single axis, and combine multiple summed axes into a # single axis. t0_shape = _get_shape(t0) num_broadcast_elements_t0 = _total_size( t0_shape[len(preserved_axes):-len(axes_to_sum)]) num_summed_elements = _total_size(t0_shape[-len(axes_to_sum):]) new_shape = ( t0_shape[:len(preserved_axes)] + [num_broadcast_elements_t0, num_summed_elements]) t0 = _reshape_if_necessary(t0, new_shape) t1_shape = _get_shape(t1) num_broadcast_elements_t1 = _total_size( t1_shape[len(preserved_axes) + len(axes_to_sum):]) new_shape = ( t1_shape[:len(preserved_axes)] + [num_summed_elements, num_broadcast_elements_t1]) t1 = _reshape_if_necessary(t1, new_shape) product = math_ops.matmul(t0, t1) # Undo compaction of broadcast axes uncompacted_shape = ( t0_shape[:len(preserved_axes) + len(broadcast_axes[0])] + t1_shape[len(t1_shape) - len(broadcast_axes[1]):]) product = _reshape_if_necessary(product, uncompacted_shape) product_axes = ( sorted_axes[0][:len(preserved_axes) + len(broadcast_axes[0])] + sorted_axes[1][len(sorted_axes[1]) - len(broadcast_axes[1]):]) return product, ''.join(product_axes) def _transpose_if_necessary(tensor, perm): """Like transpose(), but avoids creating a new tensor if possible.""" if perm != range(len(perm)): return array_ops.transpose(tensor, perm=perm) else: return tensor def _reshape_if_necessary(tensor, new_shape): """Like reshape(), but avoids creating a new tensor if possible.""" # Accept None as an alias for -1 in new_shape. new_shape = tuple(-1 if x is None else x for x in new_shape) cur_shape = tuple(x.value for x in tensor.get_shape()) if (len(new_shape) == len(cur_shape) and all(d0 == d1 or d1 == -1 for d0, d1 in zip(cur_shape, new_shape))): return tensor else: return array_ops.reshape(tensor, new_shape) def _get_shape(tensor): """Like get_shape().as_list(), but explicitly queries the shape of a tensor if necessary to ensure that the returned value contains no unknown value.""" shape = tensor.get_shape().as_list() none_indices = [i for i, d in enumerate(shape) if d is None] if none_indices: # Query the shape if shape contains None values shape_tensor = array_ops.shape(tensor) for i in none_indices: shape[i] = shape_tensor[i] return shape def _total_size(shape_values): """Given list of tensor shape values, returns total size. If shape_values contains tensor values (which are results of array_ops.shape), then it returns a scalar tensor. If not, it returns an integer.""" result = 1 for val in shape_values: result *= val return result def _exponential_space_einsum(equation, *inputs): """Fallback implementation that supports summing an index over > 2 inputs.""" if '...' in equation: raise ValueError('Subscripts with ellipses are not yet supported.') match = re.match('^([a-zA-Z,]+)(->[a-zA-Z]*)?$', equation) if not match: raise ValueError('Indices have incorrect format: %s' % equation) inputs = list(inputs) idx_in = match.group(1).split(',') idx_all = set(''.join(idx_in)) indices = ''.join(sorted(idx_all)) if match.group(2): idx_out = match.group(2)[2:] else: # infer the output subscripts if not given, assume alphabetical order counts = {ax: 0 for ax in indices} for axes_ in idx_in: for ax in axes_: counts[ax] += 1 idx_out = ''.join(sorted(ax for ax in indices if counts[ax] == 1)) if len(idx_in) != len(inputs): raise ValueError('Expected %d inputs but got %d' % (len(idx_in), len(inputs))) missing_idx = set(idx_out).difference(idx_all) if missing_idx: raise ValueError('Unknown output axes: %s' % missing_idx) axis_order = {} for ax in indices: if ax not in idx_out: axis_order[ax] = len(axis_order) for ax in idx_out: axis_order[ax] = len(axis_order) # transpose inputs so axes are in order for i, (input_, axes_) in enumerate(zip(inputs, idx_in)): if input_.get_shape().ndims != len(axes_): raise ValueError( 'Input %d with axes %s has incorrect' \ ' number of dimensions (expected %d, got %d)' % ( i, axes_, len(axes_), input_.get_shape().ndims ) ) sorted_idx = sorted(axes_, key=axis_order.get) if len(set(axes_)) != len(axes_): raise ValueError( 'Subscript not supported: an axis appears more than once: %s' % axes_) if list(axes_) != sorted_idx: permuted = [axes_.find(ax) for ax in sorted_idx] inputs[i] = array_ops.transpose(input_, permuted) idx_in[i] = sorted_idx reduction_idx = [] shapes = [[dim if dim else -1 for dim in tensor.get_shape().as_list()] for tensor in inputs] # validate shapes for broadcasting for j, ax in enumerate(sorted(idx_all, key=axis_order.get)): dims = [] for i, idx in enumerate(idx_in): if ax not in idx: shapes[i].insert(j, 1) else: dim = shapes[i][j] if isinstance(dim, int) and dim > 1: dims.append(dim) if len(set(dims)) > 1: raise ValueError('Dimension mismatch on axis: %s' % ax) if ax not in idx_out: reduction_idx.append(j) # reshape, multiply expanded_inputs = [ array_ops.reshape(input_, shape) for input_, shape in zip(inputs, shapes) ] expanded_output = 1 for input_ in expanded_inputs: expanded_output *= input_ # contract return math_ops.reduce_sum(expanded_output, reduction_idx) import numpy as np # from numpy.random import randint my_list = [1,2,3,4,5,6] new_list = [[1,2,3], [4,5,6], [7,8,9]] # 1D array print('Casting a premade list into a 1D numpy array') print(np.array(my_list)) # 2D array, note the extra brackets being displayed print('\nCasting a list of lists into a 2D numpy array') print(np.array(new_list)) # similar to regular range function # (start, stop, step) print('\n np.arange to create a 1D array from (start, stop, step)') print(np.arange(0,10,2)) # returns evenly space points between (start, stop, num=50) # only a 1D array # example below returns 30 evenly space pts between 0 and 5 print('\n np.linspace to return evenly space arrays from (start, stop, num)') print(np.linspace(0,5,30)) # arrays of zeros and ones # 2D arrays as we're passing in tuples print('\n Zeros and Ones') print(np.zeros((3,3))) print() print(np.ones((3,3))) # identity matrix - for linear algebra problems # returns a 2D array with ones on the diagonal and zeros elsewhere # will square the argument, thus example below is returning a 7x7 array print('\n Identity Matrix') print(np.eye(7)) # random.rand # returns random values in a given shape, not ints # 1st example is 1D array # 2nd example is 2D array, note we don't have to pass in tuples as like before print('\n random.rand as a 1D array') print(np.random.rand(5)) print('\n random.rand as a 2D array') print(np.random.rand(5,5)) # random.randn # returns sample from "Standard Normal"/ Gaussian distribution # 2D plus arrays no need to pass in tuples either print('\n Standard Normal/ Gaussian distribution in a 1D array') print(np.random.randn(7)) print('\n Same Gaussian except in a 2D array if 2 arguments were passed in') print(np.random.randn(4,4)) # random.randint # returns 1 random int if size is not specified # (low, high, size) print('\n random.randint to return n random ints from (low, high, size)') print(np.random.randint(0,10,5)) # reshaping an array # first build a 1D array using np.arange # then reshape and assign to a new variable # note that total size of new array must remain the same # if OG array was only 25 elements, we cannot reshape it into a 5x10 array print('\n array.reshape on an array created with np.arange(0, 25)') arr = np.arange(0,25) print(arr) arr2 = arr.reshape(5,5) print('\n Note reshaping does not alter the original array,\n so we assigned it to a new variable') print(arr2) # shape attribute print('\n the shape of the array is {}'.format(arr2.shape)) # finding max and min # finding position of the max and min # finding the type of the array with dtype attribute randr = np.random.randint(0,100,20) print('\n finding the max/min of a random array') print(randr) print('\nThe max is {} and min is {}'.format(randr.max(), randr.min())) print('The max of {} is located at position {}'.format(randr.max(), randr.argmax())) print('The min of {} is located at position {}'.format(randr.min(), randr.argmin())) print('\nThe type of the array is {}'.format(randr.dtype)) # _*_ coding: utf-8 _*_ types = ['string', 'str', 'list', 'dict', 'bool'] glsl_types = ['struct*', 'int*', 'float*', 'vec2', 'vec3', 'vec4', 'mat2', 'mat3', 'mat4'] glsl_xtypes = ['mat2x2', 'mat3x3', 'mat4x4'] ## others not supported in WebGLSL glsl_types.extend( glsl_xtypes ) glsl_aliases = ['floatPOINTER', 'intPOINTER', 'structPOINTER'] #types.extend( glsl_types ) #types.extend( glsl_aliases ) native_number_types = ['int', 'float', 'double'] ## float and double are the same simd_types = ['float32x4', 'int32x4'] ## dart vector_types = ['float32vec'] vector_types.extend( simd_types ) number_types = ['long'] ## requires https://github.com/dcodeIO/Long.js number_types.extend( native_number_types ) types.extend( number_types) types.extend( vector_types ) __whitespace = [' ', '\t'] GO_SPECIAL_CALLS = { 'go' : '__go__', 'spawn' : '__go__', 'channel' : '__go_make_chan__', 'go.channel' : '__go_make_chan__', 'go.array' : '__go__array__', 'go.make' : '__go_make__', 'go.addr' : '__go__addr__', 'go.func' : '__go__func__', } OPERATORS = { 'left' : { u'⟦' : '__getitem__', u'⟪' : '__getpeer__', u'⟅' : '__getserver__', u'⎨' : '__getclient__' }, 'right' : [u'⟧', u'⟫', u'⟆', u'⎬'], } def get_indent(s): indent = [] for char in s: if char in __whitespace: indent.append( char ) else: break return ''.join(indent) def transform_source( source, strip=False, allow_tabs_and_spaces=True ): output = [] output_post = None asm_block = False asm_block_indent = 0 indent_unit = '' # indent sensitive, for line in source.splitlines(): if line.strip().startswith('#'): continue if asm_block: dent = get_indent(line) if asm_block==True: asm_block = 'OK' asm_block_indent = len(dent) if len(dent) < asm_block_indent: asm_block = False asm_block_indent = 0 elif len(dent) > asm_block_indent: raise SyntaxError('invalid asm indentation level') else: assert len(dent)==asm_block_indent if line.strip(): output.append( '%s"%s"' %(dent,line.strip()) ) else: asm_block = False asm_block_indent = 0 continue a = [] hit_go_typedef = False hit_go_funcdef = False gotype = None isindef = False isinlet = False inline_wrap = False inline_ptr = False prevchar = None for i,char in enumerate(line): if isindef is False and len(a) and ''.join(a).strip().startswith('def '): isindef = True if isinlet is False and len(a) and ''.join(a).strip().startswith('let '): isinlet = True nextchar = None j = i+1 while j < len(line): nextchar = line[j] if nextchar.strip(): break j += 1 if prevchar=='=' and char in '&*~': inline_ptr = True a.append('__inline__["' + char) elif inline_ptr and char not in '&*~': inline_ptr = False a.append('"] << ') a.append( char ) elif char == '(' and nextchar in ('&','@'): inline_wrap = True a.append('(inline("') elif char in '),' and inline_wrap: inline_wrap = False for u,_ in enumerate(a): if _=='@': a[u] = 'ref ' if char == ')': a.append('"))') else: a.append('"),') elif not isindef and len(a) and char in OPERATORS['left'] and j==i+1: a.append( '<<__op_left__(u"%s")<<' %char) elif not isindef and len(a) and char in OPERATORS['right']: a.append('<<__op_right__(u"%s")' % char ) ## go array and map syntax ## #elif (not isindef and not isinlet) and len(a) and char==']' and j==i+1 and nextchar!=None and nextchar in '[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': elif not isindef and len(a) and char==']' and j==i+1 and nextchar!=None and nextchar in '[abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ': assert '[' in a hit_go_typedef = True gotype = [] restore = list(a) b = a.pop() while b != '[': gotype.append(b) b = a.pop() gotype.reverse() gotype = ''.join(gotype).strip() ## fixes spaces inside brackets `[ 1 ]string()` if not gotype: if nextchar=='[': a.append('__go__array__<<') else: a.append('__go__array__(') elif gotype.isdigit(): p = ''.join(a).split()[-1].strip() if p.startswith('[') or p.startswith('='): a.append('__go__arrayfixed__(%s,' %gotype) else: hit_go_typedef = False restore.append(char) a = restore elif ''.join(a[-3:])=='map' and gotype != 'func' and a[-4] in __whitespace+['=']: a.pop(); a.pop(); a.pop() a.append('__go__map__(%s,' %gotype) else: hit_go_typedef = False restore.append(char) a = restore elif hit_go_funcdef and char==')' and ')' in ''.join(a).split('func(')[-1] and not ''.join(a).strip().startswith('def '): hit_go_funcdef = False a.append('))<<') elif hit_go_typedef and char=='(': if ''.join(a).endswith('func'): hit_go_funcdef = True a.append( '(' ) else: a.append(')<<(') hit_go_typedef = False elif hit_go_typedef and char=='{': a.append(')<<{') hit_go_typedef = False elif hit_go_typedef and char==',': #a.append(', type=True),') ## this breaks function annotations that splits on ',' a.append('<` or `std::something` ct = c.split(':')[-1] if ('<' in ct and '>' in ct) or '::' in ct: c = c.replace(':', ',"') if '=' in c: c = c.replace('=', '", ') else: c += '"' else: c = c.replace(':', ',') if '=' in c: c = c.replace('=', ',') if mut: c += ',mutable=True)' else: c += ')' ## this conflicts with inline javascript and lua, ## TODO make the parser smarter, and skip quoted strings #if '= function(' in c: # k = '= function(' # a,b = c.split(k) # output.append( '@expression(%s)' %a.strip()) # c = 'def __NAMELESS__(' + b indent = [] for char in c: if char in __whitespace: indent.append( char ) else: break indent = ''.join(indent) if ' except ' in c and ':' in c: ## PEP 463 - exception expressions s = c.split(' except ') if len(s) == 2 and '=' in s[0] and ':' in s[1]: s0 = s[0].strip() output.append('%stry: %s' %(indent, s0) ) exception, default = s[1].split(':') output.append('%sexcept %s: %s=%s' %(indent, exception, s0.split('=')[0], default) ) c = '' if not allow_tabs_and_spaces: ## TODO fixme, this is not safe now because we do not skip quoted text indent = len(c) - len(c.lstrip()) if indent_unit == '' and indent: indent_unit = c[0] elif c: if indent and c[0] != indent_unit: raise TabError('inconsistent use of tabs and spaces in indentation in line:', str(i+1) + '\n'+ c) indent = indent_unit*indent if ' def(' in c or ' def (' in c: if ' def(' in c: a,b = c.split(' def(') else: a,b = c.split(' def (') if '=' in a: output.append( indent + '@expression(%s)' %a.split('=')[0]) c = indent + 'def __NAMELESS__(' + b if c.strip().startswith('def ') and '->' in c: ## python3 syntax c, rtype = c.split('->') c += ':' rtype = rtype.strip()[:-1].strip() if rtype.startswith('*'): rtype = '"%s"' %rtype elif rtype.startswith('['): rtype = '"%s"' %rtype if not strip: output.append( indent + '@returns(%s)' %rtype) if c.startswith('import '): if '-' in c: c = c.replace('-', '__DASH__') if '/' in c: c = c.replace('/', '__SLASH__') if '"' in c: c = c.replace('"', '') if ' new ' in c: c = c.replace(' new ', ' __new__>>') if '\tnew ' in c: c = c.replace('\tnew ', ' __new__>>') ## golang if c.strip().startswith('switch '): c = c.replace('switch ', 'with __switch__(').replace(':', '):') if c.strip().startswith('default:'): c = c.replace('default:', 'with __default__:') if c.strip().startswith('select:'): c = c.replace('select:', 'with __select__:') if c.strip().startswith('case ') and c.strip().endswith(':'): c = c.replace('case ', 'with __case__(').replace(':', '):') if '<-' in c: if '=' in c and c.index('=') < c.index('<-'): c = c.replace('<-', '__go__receive__<<') else: ## keeping `=` allows for compatible transform to stacklessPython API, ## this is not used now because it is not required by the Go backend. c = c.replace('<-', '= __go__send__<<') #c = c.replace('<-', '<<__go__send__<<') ## c++ `->` ## not required anymore because `.` always becomes `->` #if '->' in c: # a,b = c.split('->') # this_name = a.split()[-1].split('=')[-1].split(':')[-1].split(',')[-1] # method_name = b.split()[0].split('(')[0] # c = c.replace('->'+method_name, '.__leftarrow__.'+method_name) ## TODO should be rightarrow ## python3 annotations if 'def ' in c and c.count(':') > 1: #head, tail = c.split('(') head = c[ : c.index('(') ] tail = c[ c.index('(')+1 : ] args = [] #tail, tailend = tail.split(')') tailend = tail[ tail.rindex(')')+1 : ] tail = tail[ : tail.rindex(')') ] for x in tail.split(','): y = x if ':' in y: kw = None if '=' in y: y, kw = y.split('=') #arg, typedef = y.split(':') arg = y[ : y.index(':') ] typedef = y[ y.index(':')+1 : ] chan = False T = False if len(typedef.strip().split()) >= 2: parts = typedef.strip().split() if 'chan' in parts: ## go syntax chan = True else: ## rust or c++ syntax T = ' '.join(parts[:-1]) #typedef = typedef.strip().split()[-1] typedef = parts[-1] if '*' in arg: arg_name = arg.split('*')[-1] else: arg_name = arg if typedef.startswith('*'): typedef = '"%s"' %typedef.strip() elif typedef.startswith('[]'): #typedef = '"*%s"' %typedef.strip() ## the pointer hack should not be forced here for arrays typedef = '__arg_array__("%s")' %typedef.strip() ## this parses the go syntax and converts it for each backend elif typedef.startswith('map['): #typedef = '"*%s"' %typedef.strip() ## the pointer hack should not be forced here for maps typedef = '__arg_map__("%s")' %typedef.strip() ## this parses the go syntax and converts it for each backend elif typedef.startswith('func('): typedef = '"%s"' %typedef.strip() elif typedef.startswith('lambda('): typedef = '"%s"' %typedef.strip() elif '<' in typedef and '>' in typedef: ## rust and c++ template/generics syntax typedef = '"%s"' %typedef.strip() elif ':' in typedef and typedef.strip().startswith('[') and typedef.strip().endswith(']'): ## verilog [bit:index] syntax typedef = '"%s"' %typedef.strip() if not strip: if T: ## rust or c++ syntax output.append('%s@__typedef__(%s, %s, "%s")' %(indent, arg_name, typedef, T)) elif chan: output.append('%s@typedef_chan(%s=%s)' %(indent, arg_name, typedef)) else: output.append('%s@typedef(%s=%s)' %(indent, arg_name, typedef)) if kw: arg += '=' + kw args.append(arg) else: args.append(x) c = head +'(' + ','.join(args) + ')'+tailend ## restores to python2 syntax elif '::' in c and '<' in c and '>' in c and c.count('<')==c.count('>'): ## c++ syntax `('std::bla')(foo)` ## could auto quote here so `(std::)` becomes `('std::') c = c.replace(">`", ">')<<") left = c.index('::') while c[left]!='`': left -= 1 c = c[ :left-1 ] + " inline('" + c[left+1:] ## jquery ## ## TODO ensure this is not inside quoted text if '$(' in c: c = c.replace('$(', '__DOLLAR__(') if '$' in c and 'def ' in c: ## $ as function parameter c = c.replace('$', '__DOLLAR__') if '$.' in c: c = c.replace('$.', '__DOLLAR__.') if c.strip().startswith('nonlocal '): ## Python3 syntax c = c.replace('nonlocal ', 'global ') ## fake nonlocal with global if not c.startswith('except ') and ' as ' in c and (c.strip().startswith('return ') or '(' in c or '=' in c): c = c.replace(' as ', '<<__as__<<') if c.strip().startswith('with asm('): asm_block = True if strip and c.strip().startswith('with ('): c = c.split('with (')[0] + 'if True:' ## regular output output.append( c ) r = '\n'.join(output) return r ## deprecated #int a = 1 #float b = 1.1 #str c = "hi" #int d #int def xxx(): pass ## TODO deprecate #class A: # def __init__(self): # int self.x = 1 # []int self.y = []int() # class:ABS self.z = A() # []A self.z = A() # bool self.b = xxx() # *ABS self.z = A() # #[]*A self.z = A() ## this is ugly test = u''' if True: d = a[ 'somekey' ] except KeyError: 'mydefault' ## <- becomes __go__send__<int: return cb(3) def wrapper(a:int, c:chan int): result = longCalculation(a) c <- result switch a.f(): case 1: print(x) case 2: print(y) default: break select: case x = <- a: y += x case x = <- b: y += x def f(a:int, b:int, c:int) ->int: return a+b+c def f(a:int=100, b:int=100) ->int: return a+b def f(*args:int, **kwargs:int) ->int: return a+b a = []int(x for x in range(3)) y = go.make([]float64, 1000) def plot(id:string, latency:[]float64, xlabel:string, title:string ): pass def f( x:*ABC ) -> *XXX: pass def listpass( a:[]int ): pass def mappass( a:map[string]int ): return ConvertDataUnits[unit_type][unit][1][0] m = map[int]string{ a:'xxx' for a in range(10)} a = xxx[x][y] a = xxx⎨Z⎬ a = xxx ⎨Z⎬⎨zzzz⎬ functions = map[string]func(int)(int){} []int a = go( f() for f in funtions ) ## in go becomes: map[string]int{x,y,z} ## becomes: __go__map__(string, int) << {'x':x, 'y':y, 'z':z} a = map[string]int{ "x":x, "y":y, "z":z } def f(): return [[0]] print f()[0][0] ## in go becomes: []string{x,y,z} ## becomes: __go__array__(string) << (x,y,z) a = []string(x,y,z) ## in go becomes: [3]int{x,y,z} ## becomes: __go__arrayfixed__(3, string) << (x,y,z) a = [ 3 ]int(x,y,z) ## Rust ## f(inline('&mut *x')) f(&mut *x) ## f(inline('ref mut *x'), y.z()) f(@mut *x, y.z()) ## f(x << __as__ << uint) f(x as uint) ## __let__[x :" Vec<(uint, Y)> "]= range(0,1).map().collect() let x : Vec<(uint, Y)> = range(0,1).map().collect() let i i = &**x def f(a:&mut int) ->int: return a def f(): with asm( outputs=b, inputs=a, volatile=True ): movl %1, %%ebx; movl %%ebx, %0; return x let mut x : int = 1 let x : int def __init__(): let self.x : int = x let mut self.y : int = y def call_method( cb:lambda(int)(int) ) ->int: return cb(3) if self.__map[r][c] in (WALL,PERM_WALL): pass ## allow func to be used as a function name, because it is pretty commom and allowed by most backends. def func(x=None, callback=None): func( callback=xxx ) x.func( xx=yy ) let mut x = 0 def templated( x : Type ): pass def templated( x : namespace::Type ): pass c.x[0] = def(xx,yy) ->int: return xx+yy mdarray = [][]int() def F() ->[][]int: pass def f(): return A as B print `std::chrono::duration_cast`clock().count() with (some, stuff): pass def f(): let x : map[string]int = {} ''' ## function expressions, deprecated ## TODO: this would be nice to bring back with a proper parser #X.func( cb1=def (): # return 1, # cb2=def (x:int, y:string): # return 2 #) #a = { # 'cb1': def (x,y): # return x+y #} #def xxx(): # b = { # 'cb1': def (x,y): # return x+y, # 'cb2': def (x,y): # return x+y # } #A.do_something( x,y,z, callback=def cb(x): # return x+y #) #A.do_something( x,y,z, callback=def (x,y,z): # return x+y #) if __name__ == '__main__': out = transform_source(test) print(out) import ast print( ast.parse(out) ) # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. import logging import werkzeug from odoo import http, _ from odoo.addons.auth_signup.models.res_users import SignupError from odoo.addons.web.controllers.main import ensure_db, Home from odoo.http import request _logger = logging.getLogger(__name__) class AuthSignupHome(Home): @http.route() def web_login(self, *args, **kw): ensure_db() response = super(AuthSignupHome, self).web_login(*args, **kw) response.qcontext.update(self.get_auth_signup_config()) if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'): # Redirect if already logged in and redirect param is present return http.redirect_with_hash(request.params.get('redirect')) return response @http.route('/web/signup', type='http', auth='public', website=True) def web_auth_signup(self, *args, **kw): qcontext = self.get_auth_signup_qcontext() if not qcontext.get('token') and not qcontext.get('signup_enabled'): raise werkzeug.exceptions.NotFound() if 'error' not in qcontext and request.httprequest.method == 'POST': try: self.do_signup(qcontext) return super(AuthSignupHome, self).web_login(*args, **kw) except (SignupError, AssertionError), e: if request.env["res.users"].sudo().search([("login", "=", qcontext.get("login"))]): qcontext["error"] = _("Another user is already registered using this email address.") else: _logger.error(e.message) qcontext['error'] = _("Could not create a new account.") return request.render('auth_signup.signup', qcontext) @http.route('/web/reset_password', type='http', auth='public', website=True) def web_auth_reset_password(self, *args, **kw): qcontext = self.get_auth_signup_qcontext() if not qcontext.get('token') and not qcontext.get('reset_password_enabled'): raise werkzeug.exceptions.NotFound() if 'error' not in qcontext and request.httprequest.method == 'POST': try: if qcontext.get('token'): self.do_signup(qcontext) return super(AuthSignupHome, self).web_login(*args, **kw) else: login = qcontext.get('login') assert login, "No login provided." request.env['res.users'].sudo().reset_password(login) qcontext['message'] = _("An email has been sent with credentials to reset your password") except SignupError: qcontext['error'] = _("Could not reset your password") _logger.exception('error when resetting password') except Exception, e: qcontext['error'] = e.message return request.render('auth_signup.reset_password', qcontext) def get_auth_signup_config(self): """retrieve the module config (which features are enabled) for the login page""" IrConfigParam = request.env['ir.config_parameter'] return { 'signup_enabled': IrConfigParam.sudo().get_param('auth_signup.allow_uninvited') == 'True', 'reset_password_enabled': IrConfigParam.sudo().get_param('auth_signup.reset_password') == 'True', } def get_auth_signup_qcontext(self): """ Shared helper returning the rendering context for signup and reset password """ qcontext = request.params.copy() qcontext.update(self.get_auth_signup_config()) if qcontext.get('token'): try: # retrieve the user info (name, login or email) corresponding to a signup token token_infos = request.env['res.partner'].sudo().signup_retrieve_info(qcontext.get('token')) for k, v in token_infos.items(): qcontext.setdefault(k, v) except: qcontext['error'] = _("Invalid signup token") qcontext['invalid_token'] = True return qcontext def do_signup(self, qcontext): """ Shared helper that creates a res.partner out of a token """ values = { key: qcontext.get(key) for key in ('login', 'name', 'password') } assert values.values(), "The form was not properly filled in." assert values.get('password') == qcontext.get('confirm_password'), "Passwords do not match; please retype them." supported_langs = [lang['code'] for lang in request.env['res.lang'].sudo().search_read([], ['code'])] if request.lang in supported_langs: values['lang'] = request.lang self._signup_with_values(qcontext.get('token'), values) request.env.cr.commit() def _signup_with_values(self, token, values): db, login, password = request.env['res.users'].sudo().signup(values, token) request.env.cr.commit() # as authenticate will use its own cursor we need to commit the current transaction uid = request.session.authenticate(db, login, password) if not uid: raise SignupError(_('Authentication Failed.')) # -*- coding: utf-8 -*- import terrariumLogging logger = terrariumLogging.logging.getLogger(__name__) import sqlite3 import time import copy import os from terrariumUtils import terrariumUtils class terrariumCollector(object): DATABASE = 'history.db' # Store data every Xth minute. Except switches and doors STORE_MODULO = 1 * 60 def __init__(self,versionid): logger.info('Setting up collector database %s' % (terrariumCollector.DATABASE,)) self.__recovery = False self.__connect() self.__create_database_structure() self.__upgrade(int(versionid.replace('.',''))) logger.info('TerrariumPI Collecter is ready') def __connect(self): self.db = sqlite3.connect(terrariumCollector.DATABASE) # https://www.whoishostingthis.com/compare/sqlite/optimize/ with self.db as db: cur = db.cursor() cur.execute('PRAGMA journal_mode = MEMORY') cur.execute('PRAGMA temp_store = MEMORY') # Line below is not safe for a Pi. As this can/will corrupt the database when the Pi crashes.... # cur.execute('PRAGMA synchronous = OFF') self.db.row_factory = sqlite3.Row logger.info('Database connection created to database %s' % (terrariumCollector.DATABASE,)) def __create_database_structure(self): with self.db as db: cur = db.cursor() cur.execute('''CREATE TABLE IF NOT EXISTS sensor_data (id VARCHAR(50), type VARCHAR(15), timestamp INTEGER(4), current FLOAT(4), limit_min FLOAT(4), limit_max FLOAT(4), alarm_min FLOAT(4), alarm_max FLOAT(4), alarm INTEGER(1))''') cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS sensor_data_unique ON sensor_data(id,type,timestamp ASC)') cur.execute('CREATE INDEX IF NOT EXISTS sensor_data_timestamp ON sensor_data(timestamp ASC)') cur.execute('CREATE INDEX IF NOT EXISTS sensor_data_avg ON sensor_data(type,timestamp ASC)') cur.execute('CREATE INDEX IF NOT EXISTS sensor_data_id ON sensor_data(id,timestamp ASC)') cur.execute('''CREATE TABLE IF NOT EXISTS switch_data (id VARCHAR(50), timestamp INTEGER(4), state INTERGER(1), power_wattage FLOAT(2), water_flow FLOAT(2))''') cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS switch_data_unique ON switch_data(id,timestamp ASC)') cur.execute('CREATE INDEX IF NOT EXISTS switch_data_timestamp ON switch_data(timestamp ASC)') cur.execute('CREATE INDEX IF NOT EXISTS switch_data_id ON switch_data(id,timestamp ASC)') cur.execute('''CREATE TABLE IF NOT EXISTS door_data (id INTEGER(4), timestamp INTEGER(4), state TEXT CHECK( state IN ('open','closed') ) NOT NULL DEFAULT 'closed')''') cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS door_data_unique ON door_data(id,timestamp ASC)') cur.execute('CREATE INDEX IF NOT EXISTS door_data_timestamp ON door_data(timestamp ASC)') cur.execute('CREATE INDEX IF NOT EXISTS door_data_id ON door_data(id,timestamp ASC)') cur.execute('''CREATE TABLE IF NOT EXISTS weather_data (timestamp INTEGER(4), wind_speed FLOAT(4), temperature FLOAT(4), pressure FLOAT(4), wind_direction VARCHAR(50), weather VARCHAR(50), icon VARCHAR(50))''') cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS weather_data_unique ON weather_data(timestamp ASC)') cur.execute('''CREATE TABLE IF NOT EXISTS system_data (timestamp INTEGER(4), load_load1 FLOAT(4), load_load5 FLOAT(4), load_load15 FLOAT(4), uptime INTEGER(4), temperature FLOAT(4), cores VARCHAR(25), memory_total INTEGER(6), memory_used INTEGER(6), memory_free INTEGER(6), disk_total INTEGER(6), disk_used INTEGER(6), disk_free INTEGER(6))''') cur.execute('CREATE UNIQUE INDEX IF NOT EXISTS system_data_unique ON system_data(timestamp ASC)') db.commit() def __upgrade(self,to_version): # Set minimal version to 3.0.0 current_version = 300 table_upgrades = {'310' : ['ALTER TABLE system_data ADD COLUMN disk_total INTEGER(6)', 'ALTER TABLE system_data ADD COLUMN disk_used INTEGER(6)', 'ALTER TABLE system_data ADD COLUMN disk_free INTEGER(6)'], '380' : ['DROP INDEX IF EXISTS sensor_data_type', 'CREATE INDEX IF NOT EXISTS sensor_data_avg ON sensor_data (type, timestamp ASC)', 'DROP INDEX IF EXISTS sensor_data_id', 'CREATE INDEX IF NOT EXISTS sensor_data_id ON sensor_data (id, timestamp ASC)', 'DROP INDEX IF EXISTS switch_data_id', 'CREATE INDEX IF NOT EXISTS switch_data_id ON switch_data (id, timestamp ASC)', 'DROP INDEX IF EXISTS door_data_id', 'CREATE INDEX IF NOT EXISTS door_data_id ON door_data (id, timestamp ASC)']} try: with open('.collector.update.{}.sql'.format('393'),'r') as sql_file: table_upgrades['393'] = [line.strip() for line in sql_file.readlines()] os.remove('.collector.update.{}.sql'.format('393')) logger.warning('There are {} sensors that have an updated ID and needs to be renamed in the database. This can take a lot of time! Please wait...' .format(len(table_upgrades['393'])/2)) except IOError as ex: # No updates... just ignore pass with self.db as db: cur = db.cursor() db_version = int(cur.execute('PRAGMA user_version').fetchall()[0][0]) if db_version > current_version: current_version = db_version if current_version == to_version: logger.info('Collector database is up to date') elif current_version < to_version: logger.info('Collector database is out of date. Running updates from %s to %s' % (current_version,to_version)) # Execute updates with self.db as db: cur = db.cursor() for update_version in table_upgrades: if current_version < int(update_version) <= to_version: # Execute all updates between the versions for sql_upgrade in table_upgrades[update_version]: try: cur.execute(sql_upgrade) logger.info('Collector database upgrade for version %s succeeded! %s' % (update_version,sql_upgrade)) except Exception as ex: if 'duplicate column name' not in str(ex): logger.error('Error updating collector database. Please contact support. Error message: %s' % (ex,)) if '380' == update_version: self.__upgrade_to_380() db.commit() if int(to_version) % 10 == 0: logger.warning('Cleaning up disk space. This will take a couple of minutes depending on the database size and sd card disk speed.') filesize = os.path.getsize(terrariumCollector.DATABASE) speed = 2 # MBps duration = filesize / 1024.0 / 1024.0 / speed logger.warning('Current database is {} in size and with a speed of {}MBps it will take {} to complete'.format(terrariumUtils.format_filesize(filesize),speed,terrariumUtils.format_uptime(duration))) cur.execute('VACUUM') cur.execute('PRAGMA user_version = ' + str(to_version)) logger.info('Updated collector database. Set version to: %s' % (to_version,)) db.commit() def __upgrade_to_380(self): # This update will remove 'duplicate' records that where added for better graphing... This will now be done at the collecting the data tables = ['door_data','switch_data'] with self.db as db: for table in tables: cur = db.cursor() data = cur.execute('SELECT id, timestamp, state FROM ' + table + ' ORDER BY id ASC, timestamp ASC') data = data.fetchall() prev_state = None prev_id = None for row in data: if prev_id is None: prev_id = row['id'] elif prev_id != row['id']: prev_id = row['id'] prev_state = None if prev_state is None: prev_state = row['state'] continue if row['state'] == prev_state: cur.execute('DELETE FROM ' + table + ' WHERE id = ? AND timestamp = ? AND state = ?', (row['id'],row['timestamp'],row['state'])) prev_state = row['state'] prev_id = row['id'] db.commit() logger.info('Collector database upgrade for version 3.8.0 succeeded! Removed duplicate records') def __recover(self): starttime = time.time() # Based on: http://www.dosomethinghere.com/2013/02/20/fixing-the-sqlite-error-the-database-disk-image-is-malformed/ # Enable recovery status self.__recovery = True logger.warn('TerrariumPI Collecter recovery mode is starting! %s', (self.__recovery,)) # Create empty sql dump variable sqldump = '' lines = 0 with open('.recovery.sql', 'w') as f: # Dump SQL data line for line for line in self.db.iterdump(): lines += 1 sqldump += line + "\n" f.write('%s\n' % line) logger.warn('TerrariumPI Collecter recovery mode created SQL dump of %s lines and %s bytes!', (lines,strlen(sqldump),)) # Delete broken db os.remove(terrariumCollector.DATABASE) logger.warn('TerrariumPI Collecter recovery mode deleted faulty database from disk %s', (terrariumCollector.DATABASE,)) # Reconnect will recreate the db logger.warn('TerrariumPI Collecter recovery mode starts reconnecting database to create a new clean database at %s', (terrariumCollector.DATABASE,)) self.__connect() self.__create_database_structure() cur = self.db.cursor() # Load the SQL data back to db cur.executescript(sqldump) logger.warn('TerrariumPI Collecter recovery mode restored the old data in a new database. %s', (terrariumCollector.DATABASE,)) # Return to normal mode self.__recovery = False logger.warn('TerrariumPI Collecter recovery mode is finished in %s seconds!', (time.time()-starttime,)) def __log_data(self,type,id,newdata): timer = time.time() if self.__recovery: logger.warn('TerrariumPI Collecter is in recovery mode. Cannot store new logging data!') return now = int(time.time()) rows = [] if type not in ['switches','door']: now -= (now % terrariumCollector.STORE_MODULO) try: with self.db as db: cur = db.cursor() if type in ['humidity','moisture','temperature','distance','ph','conductivity','light','uva','uvb','uvi','fertility','co2','volume']: cur.execute('REPLACE INTO sensor_data (id, type, timestamp, current, limit_min, limit_max, alarm_min, alarm_max, alarm) VALUES (?,?,?,?,?,?,?,?,?)', (id, type, now, newdata['current'], newdata['limit_min'], newdata['limit_max'], newdata['alarm_min'], newdata['alarm_max'], newdata['alarm'])) if type in ['weather']: cur.execute('REPLACE INTO weather_data (timestamp, wind_speed, temperature, pressure, wind_direction, weather, icon) VALUES (?,?,?,?,?,?,?)', (now, newdata['wind_speed'], newdata['temperature'], newdata['pressure'], newdata['wind_direction'], newdata['weather'], newdata['icon'])) if type in ['system']: cur.execute('REPLACE INTO system_data (timestamp, load_load1, load_load5, load_load15, uptime, temperature, cores, memory_total, memory_used, memory_free, disk_total, disk_used, disk_free) VALUES (?,?,?,?,?,?,?,?,?,?,?,?,?)', (now, newdata['load']['load1'], newdata['load']['load5'], newdata['load']['load15'], newdata['uptime'], newdata['temperature'], newdata['cores'], newdata['memory']['total'], newdata['memory']['used'], newdata['memory']['free'],newdata['disk']['total'], newdata['disk']['used'], newdata['disk']['free'])) if type in ['switches']: if 'time' in newdata: now = newdata['time'] cur.execute('REPLACE INTO switch_data (id, timestamp, state, power_wattage, water_flow) VALUES (?,?,?,?,?)', (id, now, newdata['state'], newdata['current_power_wattage'], newdata['current_water_flow'])) if type in ['door']: cur.execute('REPLACE INTO door_data (id, timestamp, state) VALUES (?,?,?)', (id, now, newdata)) db.commit() except sqlite3.DatabaseError as ex: logger.error('TerrariumPI Collecter exception! %s', (ex,)) if 'database disk image is malformed' == str(ex): self.__recover() logger.debug('Timing: updating %s data in %s seconds.' % (type,time.time()-timer)) def stop(self): self.db.close() logger.info('Shutdown data collector') def get_total_power_water_usage(self): timer = time.time() totals = {'power_wattage' : {'duration' : 0 , 'wattage' : 0.0}, 'water_flow' : {'duration' : 0 , 'water' : 0.0}} sql = '''SELECT SUM(total_wattage) AS Watt, SUM(total_water) AS Water, MAX(timestamp2)-MIN(timestamp) AS TotalTime FROM ( SELECT t1.timestamp as timestamp, t2.timestamp as timestamp2, t2.timestamp-t1.timestamp AS duration_in_seconds, (t2.timestamp-t1.timestamp) * t1.power_wattage AS total_wattage, ((t2.timestamp-t1.timestamp) / 60.0) * t1.water_flow AS total_water FROM switch_data AS t1 LEFT JOIN switch_data AS t2 ON t2.id = t1.id AND t2.timestamp = (SELECT MIN(timestamp) FROM switch_data WHERE timestamp > t1.timestamp AND id = t1.id) WHERE t1.state > 0)''' with self.db as db: cur = db.cursor() cur.execute(sql) row = cur.fetchone() if row['TotalTime'] is not None and row['Watt'] is not None: totals = {'power_wattage' : {'duration' : int(row['TotalTime']) , 'wattage' : float(row['Watt'])}, 'water_flow' : {'duration' : int(row['TotalTime']) , 'water' : float(row['Water'])}} logger.debug('Timing: Total power and water usage calculation done in %s seconds.' % ((time.time() - timer),)) return totals def log_switch_data(self,data): if data['hardwaretype'] not in ['pwm-dimmer','remote-dimmer','dc-dimmer']: # Store normal switches with value 100 indicating full power (aka no dimming) data['state'] = (100 if data['state'] == 1 else 0) self.__log_data('switches',data['id'],data) def log_door_data(self,data): self.__log_data('door',data['id'], data['state']) def log_weather_data(self,data): self.__log_data('weather',None,data) def log_sensor_data(self,data): self.__log_data(data['type'],data['id'],data) def log_system_data(self, data): self.__log_data('system',None,data) def get_history(self, parameters = [], starttime = None, stoptime = None, exclude_ids = None): # Default return object timer = time.time() history = {} periods = {'day' : 1 * 24, 'week' : 7 * 24, 'month' : 30 * 24, 'year' : 365 * 24, 'all' : 3650 * 24} modulo = terrariumCollector.STORE_MODULO logtype = parameters[0] del(parameters[0]) # Define start time if starttime is None: starttime = int(time.time()) # Define stop time if stoptime is None: stoptime = starttime - (24 * 60 * 60) if len(parameters) > 0 and parameters[-1] in periods: stoptime = starttime - periods[parameters[-1]] * 60 * 60 modulo = (periods[parameters[-1]] / 24) * terrariumCollector.STORE_MODULO del(parameters[-1]) sql = '' filters = (stoptime,starttime,) if logtype == 'sensors': fields = { 'current' : [], 'alarm_min' : [], 'alarm_max' : [] , 'limit_min' : [], 'limit_max' : []} sql = 'SELECT id, type, timestamp,' + ', '.join(list(fields.keys())) + ' FROM sensor_data WHERE timestamp >= ? AND timestamp <= ?' if len(parameters) > 0 and parameters[0] == 'average': sql = 'SELECT "average" AS id, type, timestamp' for field in fields: sql = sql + ', AVG(' + field + ') as ' + field sql = sql + ' FROM sensor_data WHERE timestamp >= ? AND timestamp <= ?' if exclude_ids is not None: sql = sql + ' AND sensor_data.id NOT IN (\'' + '\',\''.join(exclude_ids) +'\')' if len(parameters) == 2: sql = sql + ' AND type = ?' filters = (stoptime,starttime,parameters[1],) sql = sql + ' GROUP BY type, timestamp' elif len(parameters) == 2 and parameters[0] in ['temperature','humidity','distance','ph','conductivity','light','uva','uvb','uvi','fertility']: sql = sql + ' AND type = ? AND id = ?' filters = (stoptime,starttime,parameters[0],parameters[1],) elif len(parameters) == 1 and parameters[0] in ['temperature','humidity','distance','ph','conductivity','light','uva','uvb','uvi','fertility']: sql = sql + ' AND type = ?' filters = (stoptime,starttime,parameters[0],) elif len(parameters) == 1: sql = sql + ' AND id = ?' filters = (stoptime,starttime,parameters[0],) elif logtype == 'switches': fields = { 'power_wattage' : [], 'water_flow' : [] } sql = '''SELECT id, "switches" AS type, timestamp, timestamp2, state, ''' + ', '.join(list(fields.keys())) + ''' FROM ( SELECT t1.id AS id, t1.timestamp AS timestamp, IFNULL(t2.timestamp, ''' + str(starttime) + ''') as timestamp2, t1.power_wattage AS power_wattage, t1.water_flow AS water_flow, t1.state AS state FROM switch_data AS t1 LEFT JOIN switch_data AS t2 ON t2.id = t1.id AND t2.timestamp = (SELECT MIN(timestamp) FROM switch_data WHERE switch_data.timestamp > t1.timestamp AND switch_data.id = t1.id) ) WHERE timestamp2 > IFNULL((SELECT MAX(timestamp) AS timelimit FROM switch_data AS ttable WHERE ttable.id = id AND ttable.timestamp < ?),0) AND timestamp <= ?''' if len(parameters) > 0 and parameters[0] is not None: sql = sql + ' AND id = ?' filters = (stoptime,starttime,parameters[0],) elif logtype == 'doors': fields = {'state' : []} sql = '''SELECT id, "doors" AS type, timestamp, timestamp2, (CASE WHEN state == 'open' THEN 1 ELSE 0 END) AS state FROM ( SELECT t1.id AS id, t1.timestamp AS timestamp, IFNULL(t2.timestamp, ''' + str(starttime) + ''') as timestamp2, t1.state AS state FROM door_data AS t1 LEFT JOIN door_data AS t2 ON t2.id = t1.id AND t2.timestamp = (SELECT MIN(timestamp) FROM door_data WHERE door_data.timestamp > t1.timestamp AND door_data.id = t1.id) ) WHERE timestamp2 > IFNULL((SELECT MAX(timestamp) AS timelimit FROM door_data AS ttable WHERE ttable.id = id AND ttable.timestamp < ?),0) AND timestamp <= ?''' if len(parameters) > 0 and parameters[0] is not None: sql = sql + ' AND id = ?' filters = (stoptime,starttime,parameters[0],) elif logtype == 'weather': fields = { 'wind_speed' : [], 'temperature' : [], 'pressure' : [] , 'wind_direction' : [], 'rain' : [], 'weather' : [], 'icon' : []} sql = 'SELECT "city" AS id, "weather" AS type, timestamp, ' + ', '.join(list(fields.keys())) + ' FROM weather_data WHERE timestamp >= ? AND timestamp <= ?' elif logtype == 'system': fields = ['load_load1', 'load_load5','load_load15','uptime', 'temperature','cores', 'memory_total', 'memory_used' , 'memory_free', 'disk_total', 'disk_used' , 'disk_free'] if len(parameters) > 0 and parameters[0] == 'load': fields = ['load_load1', 'load_load5','load_load15'] elif len(parameters) > 0 and parameters[0] == 'cores': fields = ['cores'] elif len(parameters) > 0 and parameters[0] == 'uptime': fields = ['uptime'] elif len(parameters) > 0 and parameters[0] == 'temperature': fields = ['temperature'] elif len(parameters) > 0 and parameters[0] == 'memory': fields = ['memory_total', 'memory_used' , 'memory_free'] elif len(parameters) > 0 and parameters[0] == 'disk': fields = ['disk_total', 'disk_used' , 'disk_free'] sql = 'SELECT "system" AS type, timestamp, ' + ', '.join(fields) + ' FROM system_data WHERE timestamp >= ? AND timestamp <= ?' sql = sql + ' ORDER BY timestamp ASC, type ASC' + (', id ASC' if logtype != 'system' else '') if not self.__recovery: try: first_item = None with self.db as db: cur = db.cursor() for row in cur.execute(sql, filters): if row['type'] not in history: history[row['type']] = {} if logtype == 'system': for field in fields: system_parts = field.split('_') if system_parts[0] not in history[row['type']]: history[row['type']][system_parts[0]] = {} if len(system_parts) == 2 else [] if len(system_parts) == 2: if system_parts[1] not in history[row['type']][system_parts[0]]: history[row['type']][system_parts[0]][system_parts[1]] = [] history[row['type']][system_parts[0]][system_parts[1]].append([row['timestamp'] * 1000,row[field]]) else: history[row['type']][system_parts[0]].append([row['timestamp'] * 1000,row[field]]) else: if row['id'] not in history[row['type']]: history[row['type']][row['id']] = copy.deepcopy(fields) if row['type'] in ['switches','doors']: history[row['type']][row['id']]['totals'] = {'duration' : 0, 'power_wattage' : 0, 'water_flow' : 0} if row['type'] in ['switches','doors'] and row['state'] > 0 and row['timestamp2'] is not None and '' != row['timestamp2']: # Update totals data duration = float(row['timestamp2'] - (row['timestamp'] if row['timestamp'] >= stoptime else stoptime)) history[row['type']][row['id']]['totals']['duration'] += duration if 'switches' == row['type']: history[row['type']][row['id']]['totals']['power_wattage'] += duration * float(row['power_wattage']) # Devide by 60 to get Liters water used per minute based on seconds durations history[row['type']][row['id']]['totals']['water_flow'] += (duration / 60.0) * float(row['water_flow']) for field in fields: history[row['type']][row['id']][field].append([ (row['timestamp'] if row['timestamp'] >= stoptime else stoptime) * 1000,row[field]]) if row['type'] in ['switches','doors'] and row['timestamp2'] is not None and '' != row['timestamp2']: # Add extra point for nicer graphing of doors and power switches history[row['type']][row['id']][field].append([row['timestamp2'] * 1000,row[field]]) logger.debug('Timing: history %s query: %s seconds' % (logtype,time.time()-timer)) except sqlite3.DatabaseError as ex: logger.error('TerrariumPI Collecter exception! %s', (ex,)) if 'database disk image is malformed' == str(ex): self.__recover() # In order to get nicer graphs, we are adding a start and end time based on the selected time range if needed if logtype in ['switches','doors'] and logtype not in history and len(parameters) > 0: # Create 'empty' history array if single id is requested history[logtype] = {} history[logtype][parameters[0]] = copy.deepcopy(fields) for field in fields: history[logtype][parameters[0]][field].append([stoptime * 1000,0]) history[logtype][parameters[0]][field].append([starttime * 1000,0]) return history # Copyright (c) 2012 Rackspace Hosting # All Rights Reserved. # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Cells RPC Communication Driver """ from oslo_config import cfg import oslo_messaging as messaging from nova.cells import driver from nova import rpc cell_rpc_driver_opts = [ cfg.StrOpt('rpc_driver_queue_base', default='cells.intercell', help="Base queue name to use when communicating between " "cells. Various topics by message type will be " "appended to this.")] CONF = cfg.CONF CONF.register_opts(cell_rpc_driver_opts, group='cells') CONF.import_opt('call_timeout', 'nova.cells.opts', group='cells') rpcapi_cap_opt = cfg.StrOpt('intercell', help='Set a version cap for messages sent between cells services') CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') class CellsRPCDriver(driver.BaseCellsDriver): """Driver for cell<->cell communication via RPC. This is used to setup the RPC consumers as well as to send a message to another cell. One instance of this class will be created for every neighbor cell that we find in the DB and it will be associated with the cell in its CellState. One instance is also created by the cells manager for setting up the consumers. """ def __init__(self, *args, **kwargs): super(CellsRPCDriver, self).__init__(*args, **kwargs) self.rpc_servers = [] self.intercell_rpcapi = InterCellRPCAPI() def start_servers(self, msg_runner): """Start RPC servers. Start up 2 separate servers for handling inter-cell communication via RPC. Both handle the same types of messages, but requests/replies are separated to solve potential deadlocks. (If we used the same queue for both, it's possible to exhaust the RPC thread pool while we wait for replies.. such that we'd never consume a reply.) """ topic_base = CONF.cells.rpc_driver_queue_base proxy_manager = InterCellRPCDispatcher(msg_runner) for msg_type in msg_runner.get_message_types(): target = messaging.Target(topic='%s.%s' % (topic_base, msg_type), server=CONF.host) # NOTE(comstud): We do not need to use the object serializer # on this because object serialization is taken care for us in # the nova.cells.messaging module. server = rpc.get_server(target, endpoints=[proxy_manager]) server.start() self.rpc_servers.append(server) def stop_servers(self): """Stop RPC servers. NOTE: Currently there's no hooks when stopping services to have managers cleanup, so this is not currently called. """ for server in self.rpc_servers: server.stop() def send_message_to_cell(self, cell_state, message): """Use the IntercellRPCAPI to send a message to a cell.""" self.intercell_rpcapi.send_message_to_cell(cell_state, message) class InterCellRPCAPI(object): """Client side of the Cell<->Cell RPC API. The CellsRPCDriver uses this to make calls to another cell. API version history: 1.0 - Initial version. ... Grizzly supports message version 1.0. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 1.0. """ VERSION_ALIASES = { 'grizzly': '1.0', } def __init__(self): super(InterCellRPCAPI, self).__init__() self.version_cap = ( self.VERSION_ALIASES.get(CONF.upgrade_levels.intercell, CONF.upgrade_levels.intercell)) self.transports = {} def _get_client(self, next_hop, topic): """Turn the DB information for a cell into a messaging.RPCClient.""" transport = self._get_transport(next_hop) target = messaging.Target(topic=topic, version='1.0') serializer = rpc.RequestContextSerializer(None) return messaging.RPCClient(transport, target, version_cap=self.version_cap, serializer=serializer) def _get_transport(self, next_hop): """NOTE(belliott) Each Transport object contains connection pool state. Maintain references to them to avoid continual reconnects to the message broker. """ transport_url = next_hop.db_info['transport_url'] if transport_url not in self.transports: transport = messaging.get_transport(cfg.CONF, transport_url, rpc.TRANSPORT_ALIASES) self.transports[transport_url] = transport else: transport = self.transports[transport_url] return transport def send_message_to_cell(self, cell_state, message): """Send a message to another cell by JSON-ifying the message and making an RPC cast to 'process_message'. If the message says to fanout, do it. The topic that is used will be 'CONF.rpc_driver_queue_base.'. """ topic_base = CONF.cells.rpc_driver_queue_base topic = '%s.%s' % (topic_base, message.message_type) cctxt = self._get_client(cell_state, topic) if message.fanout: cctxt = cctxt.prepare(fanout=message.fanout) return cctxt.cast(message.ctxt, 'process_message', message=message.to_json()) class InterCellRPCDispatcher(object): """RPC Dispatcher to handle messages received from other cells. All messages received here have come from a sibling cell. Depending on the ultimate target and type of message, we may process the message in this cell, relay the message to another sibling cell, or both. This logic is defined by the message class in the nova.cells.messaging module. """ target = messaging.Target(version='1.0') def __init__(self, msg_runner): """Init the Intercell RPC Dispatcher.""" self.msg_runner = msg_runner def process_message(self, _ctxt, message): """We received a message from another cell. Use the MessageRunner to turn this from JSON back into an instance of the correct Message class. Then process it! """ message = self.msg_runner.message_from_json(message) message.process() # -*- encoding: utf-8 -*- ############################################################################## # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published # by the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses/. # ############################################################################## { 'name': 'MRP Production add middle stuff lot', 'version': "1.0", "category": "Manufacturing", "author": "OdooMRP team," "AvanzOSC," "Serv. Tecnol. Avanzados - Pedro M. Baeza", 'contributors': ["Daniel Campos ", "Pedro M. Baeza ", "Ana Juaristi "], 'website': "http://www.odoomrp.com", 'depends': ["mrp_lot_reserve", "mrp_production_add_middle_stuff"], 'data': ["wizard/addition_wizard_view.xml"], 'installable': True, 'auto_install': True, } from __future__ import print_function, division from time import time import argparse import numpy as np from sklearn.dummy import DummyClassifier from sklearn.datasets import fetch_20newsgroups_vectorized from sklearn.metrics import accuracy_score from sklearn.utils.validation import check_array from sklearn.ensemble import RandomForestClassifier from sklearn.ensemble import ExtraTreesClassifier from sklearn.ensemble import AdaBoostClassifier from sklearn.linear_model import LogisticRegression from sklearn.naive_bayes import MultinomialNB ESTIMATORS = { "dummy": DummyClassifier(), "random_forest": RandomForestClassifier(n_estimators=100, max_features="sqrt", min_samples_split=10), "extra_trees": ExtraTreesClassifier(n_estimators=100, max_features="sqrt", min_samples_split=10), "logistic_regression": LogisticRegression(), "naive_bayes": MultinomialNB(), "adaboost": AdaBoostClassifier(n_estimators=10), } ############################################################################### # Data if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('-e', '--estimators', nargs="+", required=True, choices=ESTIMATORS) args = vars(parser.parse_args()) data_train = fetch_20newsgroups_vectorized(subset="train") data_test = fetch_20newsgroups_vectorized(subset="test") X_train = check_array(data_train.data, dtype=np.float32, accept_sparse="csc") X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr") y_train = data_train.target y_test = data_test.target print("20 newsgroups") print("=============") print("X_train.shape = {0}".format(X_train.shape)) print("X_train.format = {0}".format(X_train.format)) print("X_train.dtype = {0}".format(X_train.dtype)) print("X_train density = {0}" "".format(X_train.nnz / np.product(X_train.shape))) print("y_train {0}".format(y_train.shape)) print("X_test {0}".format(X_test.shape)) print("X_test.format = {0}".format(X_test.format)) print("X_test.dtype = {0}".format(X_test.dtype)) print("y_test {0}".format(y_test.shape)) print() print("Classifier Training") print("===================") accuracy, train_time, test_time = {}, {}, {} for name in sorted(args["estimators"]): clf = ESTIMATORS[name] try: clf.set_params(random_state=0) except (TypeError, ValueError): pass print("Training %s ... " % name, end="") t0 = time() clf.fit(X_train, y_train) train_time[name] = time() - t0 t0 = time() y_pred = clf.predict(X_test) test_time[name] = time() - t0 accuracy[name] = accuracy_score(y_test, y_pred) print("done") print() print("Classification performance:") print("===========================") print() print("%s %s %s %s" % ("Classifier ", "train-time", "test-time", "Accuracy")) print("-" * 44) for name in sorted(accuracy, key=accuracy.get): print("%s %s %s %s" % (name.ljust(16), ("%.4fs" % train_time[name]).center(10), ("%.4fs" % test_time[name]).center(10), ("%.4f" % accuracy[name]).center(10))) print() """ Abstract base class for the various polynomial Classes. The ABCPolyBase class provides the methods needed to implement the common API for the various polynomial classes. It operates as a mixin, but uses the abc module from the stdlib, hence it is only available for Python >= 2.6. """ from __future__ import division, absolute_import, print_function from abc import ABCMeta, abstractmethod, abstractproperty from numbers import Number import numpy as np from . import polyutils as pu __all__ = ['ABCPolyBase'] class ABCPolyBase(object): """An abstract base class for series classes. ABCPolyBase provides the standard Python numerical methods '+', '-', '*', '//', '%', 'divmod', '**', and '()' along with the methods listed below. .. versionadded:: 1.9.0 Parameters ---------- coef : array_like Series coefficients in order of increasing degree, i.e., ``(1, 2, 3)`` gives ``1*P_0(x) + 2*P_1(x) + 3*P_2(x)``, where ``P_i`` is the basis polynomials of degree ``i``. domain : (2,) array_like, optional Domain to use. The interval ``[domain[0], domain[1]]`` is mapped to the interval ``[window[0], window[1]]`` by shifting and scaling. The default value is the derived class domain. window : (2,) array_like, optional Window, see domain for its use. The default value is the derived class window. Attributes ---------- coef : (N,) ndarray Series coefficients in order of increasing degree. domain : (2,) ndarray Domain that is mapped to window. window : (2,) ndarray Window that domain is mapped to. Class Attributes ---------------- maxpower : int Maximum power allowed, i.e., the largest number ``n`` such that ``p(x)**n`` is allowed. This is to limit runaway polynomial size. domain : (2,) ndarray Default domain of the class. window : (2,) ndarray Default window of the class. """ __metaclass__ = ABCMeta # Not hashable __hash__ = None # Don't let participate in array operations. Value doesn't matter. __array_priority__ = 1000 # Limit runaway size. T_n^m has degree n*m maxpower = 100 @abstractproperty def domain(self): pass @abstractproperty def window(self): pass @abstractproperty def nickname(self): pass @abstractmethod def _add(self): pass @abstractmethod def _sub(self): pass @abstractmethod def _mul(self): pass @abstractmethod def _div(self): pass @abstractmethod def _pow(self): pass @abstractmethod def _val(self): pass @abstractmethod def _int(self): pass @abstractmethod def _der(self): pass @abstractmethod def _fit(self): pass @abstractmethod def _line(self): pass @abstractmethod def _roots(self): pass @abstractmethod def _fromroots(self): pass def has_samecoef(self, other): """Check if coefficients match. .. versionadded:: 1.6.0 Parameters ---------- other : class instance The other class must have the ``coef`` attribute. Returns ------- bool : boolean True if the coefficients are the same, False otherwise. """ if len(self.coef) != len(other.coef): return False elif not np.all(self.coef == other.coef): return False else: return True def has_samedomain(self, other): """Check if domains match. .. versionadded:: 1.6.0 Parameters ---------- other : class instance The other class must have the ``domain`` attribute. Returns ------- bool : boolean True if the domains are the same, False otherwise. """ return np.all(self.domain == other.domain) def has_samewindow(self, other): """Check if windows match. .. versionadded:: 1.6.0 Parameters ---------- other : class instance The other class must have the ``window`` attribute. Returns ------- bool : boolean True if the windows are the same, False otherwise. """ return np.all(self.window == other.window) def has_sametype(self, other): """Check if types match. .. versionadded:: 1.7.0 Parameters ---------- other : object Class instance. Returns ------- bool : boolean True if other is same class as self """ return isinstance(other, self.__class__) def _get_coefficients(self, other): """Interpret other as polynomial coefficients. The `other` argument is checked to see if it is of the same class as self with identical domain and window. If so, return its coefficients, otherwise return `other`. .. versionadded:: 1.9.0 Parameters ---------- other : anything Object to be checked. Returns ------- coef: The coefficients of`other` if it is a compatible instance, of ABCPolyBase, otherwise `other`. Raises ------ TypeError: When `other` is an incompatible instance of ABCPolyBase. """ if isinstance(other, ABCPolyBase): if not isinstance(other, self.__class__): raise TypeError("Polynomial types differ") elif not np.all(self.domain == other.domain): raise TypeError("Domains differ") elif not np.all(self.window == other.window): raise TypeError("Windows differ") return other.coef return other def __init__(self, coef, domain=None, window=None): [coef] = pu.as_series([coef], trim=False) self.coef = coef if domain is not None: [domain] = pu.as_series([domain], trim=False) if len(domain) != 2: raise ValueError("Domain has wrong number of elements.") self.domain = domain if window is not None: [window] = pu.as_series([window], trim=False) if len(window) != 2: raise ValueError("Window has wrong number of elements.") self.window = window def __repr__(self): format = "%s(%s, %s, %s)" coef = repr(self.coef)[6:-1] domain = repr(self.domain)[6:-1] window = repr(self.window)[6:-1] name = self.__class__.__name__ return format % (name, coef, domain, window) def __str__(self): format = "%s(%s)" coef = str(self.coef) name = self.nickname return format % (name, coef) # Pickle and copy def __getstate__(self): ret = self.__dict__.copy() ret['coef'] = self.coef.copy() ret['domain'] = self.domain.copy() ret['window'] = self.window.copy() return ret def __setstate__(self, dict): self.__dict__ = dict # Call def __call__(self, arg): off, scl = pu.mapparms(self.domain, self.window) arg = off + scl*arg return self._val(arg, self.coef) def __iter__(self): return iter(self.coef) def __len__(self): return len(self.coef) # Numeric properties. def __neg__(self): return self.__class__(-self.coef, self.domain, self.window) def __pos__(self): return self def __add__(self, other): try: othercoef = self._get_coefficients(other) coef = self._add(self.coef, othercoef) except TypeError as e: raise e except: return NotImplemented return self.__class__(coef, self.domain, self.window) def __sub__(self, other): try: othercoef = self._get_coefficients(other) coef = self._sub(self.coef, othercoef) except TypeError as e: raise e except: return NotImplemented return self.__class__(coef, self.domain, self.window) def __mul__(self, other): try: othercoef = self._get_coefficients(other) coef = self._mul(self.coef, othercoef) except TypeError as e: raise e except: return NotImplemented return self.__class__(coef, self.domain, self.window) def __div__(self, other): # set to __floordiv__, /, for now. return self.__floordiv__(other) def __truediv__(self, other): # there is no true divide if the rhs is not a Number, although it # could return the first n elements of an infinite series. # It is hard to see where n would come from, though. if not isinstance(other, Number) or isinstance(other, bool): form = "unsupported types for true division: '%s', '%s'" raise TypeError(form % (type(self), type(other))) return self.__floordiv__(other) def __floordiv__(self, other): res = self.__divmod__(other) if res is NotImplemented: return res return res[0] def __mod__(self, other): res = self.__divmod__(other) if res is NotImplemented: return res return res[1] def __divmod__(self, other): try: othercoef = self._get_coefficients(other) quo, rem = self._div(self.coef, othercoef) except (TypeError, ZeroDivisionError) as e: raise e except: return NotImplemented quo = self.__class__(quo, self.domain, self.window) rem = self.__class__(rem, self.domain, self.window) return quo, rem def __pow__(self, other): coef = self._pow(self.coef, other, maxpower=self.maxpower) res = self.__class__(coef, self.domain, self.window) return res def __radd__(self, other): try: coef = self._add(other, self.coef) except: return NotImplemented return self.__class__(coef, self.domain, self.window) def __rsub__(self, other): try: coef = self._sub(other, self.coef) except: return NotImplemented return self.__class__(coef, self.domain, self.window) def __rmul__(self, other): try: coef = self._mul(other, self.coef) except: return NotImplemented return self.__class__(coef, self.domain, self.window) def __rdiv__(self, other): # set to __floordiv__ /. return self.__rfloordiv__(other) def __rtruediv__(self, other): # An instance of ABCPolyBase is not considered a # Number. return NotImplemented def __rfloordiv__(self, other): res = self.__rdivmod__(other) if res is NotImplemented: return res return res[0] def __rmod__(self, other): res = self.__rdivmod__(other) if res is NotImplemented: return res return res[1] def __rdivmod__(self, other): try: quo, rem = self._div(other, self.coef) except ZeroDivisionError as e: raise e except: return NotImplemented quo = self.__class__(quo, self.domain, self.window) rem = self.__class__(rem, self.domain, self.window) return quo, rem # Enhance me # some augmented arithmetic operations could be added here def __eq__(self, other): res = (isinstance(other, self.__class__) and np.all(self.domain == other.domain) and np.all(self.window == other.window) and (self.coef.shape == other.coef.shape) and np.all(self.coef == other.coef)) return res def __ne__(self, other): return not self.__eq__(other) # # Extra methods. # def copy(self): """Return a copy. Returns ------- new_series : series Copy of self. """ return self.__class__(self.coef, self.domain, self.window) def degree(self): """The degree of the series. .. versionadded:: 1.5.0 Returns ------- degree : int Degree of the series, one less than the number of coefficients. """ return len(self) - 1 def cutdeg(self, deg): """Truncate series to the given degree. Reduce the degree of the series to `deg` by discarding the high order terms. If `deg` is greater than the current degree a copy of the current series is returned. This can be useful in least squares where the coefficients of the high degree terms may be very small. .. versionadded:: 1.5.0 Parameters ---------- deg : non-negative int The series is reduced to degree `deg` by discarding the high order terms. The value of `deg` must be a non-negative integer. Returns ------- new_series : series New instance of series with reduced degree. """ return self.truncate(deg + 1) def trim(self, tol=0): """Remove trailing coefficients Remove trailing coefficients until a coefficient is reached whose absolute value greater than `tol` or the beginning of the series is reached. If all the coefficients would be removed the series is set to ``[0]``. A new series instance is returned with the new coefficients. The current instance remains unchanged. Parameters ---------- tol : non-negative number. All trailing coefficients less than `tol` will be removed. Returns ------- new_series : series Contains the new set of coefficients. """ coef = pu.trimcoef(self.coef, tol) return self.__class__(coef, self.domain, self.window) def truncate(self, size): """Truncate series to length `size`. Reduce the series to length `size` by discarding the high degree terms. The value of `size` must be a positive integer. This can be useful in least squares where the coefficients of the high degree terms may be very small. Parameters ---------- size : positive int The series is reduced to length `size` by discarding the high degree terms. The value of `size` must be a positive integer. Returns ------- new_series : series New instance of series with truncated coefficients. """ isize = int(size) if isize != size or isize < 1: raise ValueError("size must be a positive integer") if isize >= len(self.coef): coef = self.coef else: coef = self.coef[:isize] return self.__class__(coef, self.domain, self.window) def convert(self, domain=None, kind=None, window=None): """Convert series to a different kind and/or domain and/or window. Parameters ---------- domain : array_like, optional The domain of the converted series. If the value is None, the default domain of `kind` is used. kind : class, optional The polynomial series type class to which the current instance should be converted. If kind is None, then the class of the current instance is used. window : array_like, optional The window of the converted series. If the value is None, the default window of `kind` is used. Returns ------- new_series : series The returned class can be of different type than the current instance and/or have a different domain and/or different window. Notes ----- Conversion between domains and class types can result in numerically ill defined series. Examples -------- """ if kind is None: kind = self.__class__ if domain is None: domain = kind.domain if window is None: window = kind.window return self(kind.identity(domain, window=window)) def mapparms(self): """Return the mapping parameters. The returned values define a linear map ``off + scl*x`` that is applied to the input arguments before the series is evaluated. The map depends on the ``domain`` and ``window``; if the current ``domain`` is equal to the ``window`` the resulting map is the identity. If the coefficients of the series instance are to be used by themselves outside this class, then the linear function must be substituted for the ``x`` in the standard representation of the base polynomials. Returns ------- off, scl : float or complex The mapping function is defined by ``off + scl*x``. Notes ----- If the current domain is the interval ``[l1, r1]`` and the window is ``[l2, r2]``, then the linear mapping function ``L`` is defined by the equations:: L(l1) = l2 L(r1) = r2 """ return pu.mapparms(self.domain, self.window) def integ(self, m=1, k=[], lbnd=None): """Integrate. Return a series instance that is the definite integral of the current series. Parameters ---------- m : non-negative int The number of integrations to perform. k : array_like Integration constants. The first constant is applied to the first integration, the second to the second, and so on. The list of values must less than or equal to `m` in length and any missing values are set to zero. lbnd : Scalar The lower bound of the definite integral. Returns ------- new_series : series A new series representing the integral. The domain is the same as the domain of the integrated series. """ off, scl = self.mapparms() if lbnd is None: lbnd = 0 else: lbnd = off + scl*lbnd coef = self._int(self.coef, m, k, lbnd, 1./scl) return self.__class__(coef, self.domain, self.window) def deriv(self, m=1): """Differentiate. Return a series instance of that is the derivative of the current series. Parameters ---------- m : non-negative int Find the derivative of order `m`. Returns ------- new_series : series A new series representing the derivative. The domain is the same as the domain of the differentiated series. """ off, scl = self.mapparms() coef = self._der(self.coef, m, scl) return self.__class__(coef, self.domain, self.window) def roots(self): """Return the roots of the series polynomial. Compute the roots for the series. Note that the accuracy of the roots decrease the further outside the domain they lie. Returns ------- roots : ndarray Array containing the roots of the series. """ roots = self._roots(self.coef) return pu.mapdomain(roots, self.window, self.domain) def linspace(self, n=100, domain=None): """Return x, y values at equally spaced points in domain. Returns the x, y values at `n` linearly spaced points across the domain. Here y is the value of the polynomial at the points x. By default the domain is the same as that of the series instance. This method is intended mostly as a plotting aid. .. versionadded:: 1.5.0 Parameters ---------- n : int, optional Number of point pairs to return. The default value is 100. domain : {None, array_like}, optional If not None, the specified domain is used instead of that of the calling instance. It should be of the form ``[beg,end]``. The default is None which case the class domain is used. Returns ------- x, y : ndarray x is equal to linspace(self.domain[0], self.domain[1], n) and y is the series evaluated at element of x. """ if domain is None: domain = self.domain x = np.linspace(domain[0], domain[1], n) y = self(x) return x, y @classmethod def fit(cls, x, y, deg, domain=None, rcond=None, full=False, w=None, window=None): """Least squares fit to data. Return a series instance that is the least squares fit to the data `y` sampled at `x`. The domain of the returned instance can be specified and this will often result in a superior fit with less chance of ill conditioning. Parameters ---------- x : array_like, shape (M,) x-coordinates of the M sample points ``(x[i], y[i])``. y : array_like, shape (M,) or (M, K) y-coordinates of the sample points. Several data sets of sample points sharing the same x-coordinates can be fitted at once by passing in a 2D-array that contains one dataset per column. deg : int Degree of the fitting polynomial. domain : {None, [beg, end], []}, optional Domain to use for the returned series. If ``None``, then a minimal domain that covers the points `x` is chosen. If