def NotifyFlyingFE(self, proto, port, domain, be=None):
self.Notify(('Flying: %s://%s%s/'
) % (proto, domain, port and ':'+port or ''),
prefix='~<>', color=self.CYAN)
def StartListingBackEnds(self): pass
def EndListingBackEnds(self): pass
def NotifyBE(self, bid, be, has_ssl, dpaths,
is_builtin=False, fingerprint=None):
domain, port, proto = be[BE_DOMAIN], be[BE_PORT], be[BE_PROTO]
prox = (proto == 'raw') and ' (HTTP proxied)' or ''
if proto == 'raw' and port in ('22', 22): proto = 'ssh'
if has_ssl and proto == 'http':
proto = 'https'
url = '%s://%s%s' % (proto, domain, port and (':%s' % port) or '')
if be[BE_STATUS] == BE_STATUS_UNKNOWN: return
if be[BE_STATUS] & BE_STATUS_OK:
if be[BE_STATUS] & BE_STATUS_ERR_ANY:
status = 'Trying'
color = self.YELLOW
prefix = ' '
else:
status = 'Flying'
color = self.CYAN
prefix = '~<>'
else:
return
if is_builtin:
backend = 'builtin HTTPD'
else:
backend = '%s:%s' % (be[BE_BHOST], be[BE_BPORT])
self.Notify(('%s %s as %s/%s'
) % (status, backend, url, prox),
prefix=prefix, color=color)
if status == 'Flying':
for dp in sorted(dpaths.keys()):
self.Notify(' - %s%s' % (url, dp), color=self.BLUE)
if fingerprint and proto.startswith('https'):
self.Notify(' - Fingerprint=%s' % fingerprint,
color=self.WHITE)
self.Notify((' IMPORTANT: For maximum security, use a secure channel'
' to inform your'),
color=self.YELLOW)
self.Notify(' guests what fingerprint to expect.',
color=self.YELLOW)
def Status(self, tag, message=None, color=None): pass
def ExplainError(self, error, title, subject=None):
if error == 'pleaselogin':
self.Tell([title, '', 'You already have an account. Log in to continue.'
], error=True)
elif error == 'email':
self.Tell([title, '', 'Invalid e-mail address. Please try again?'
], error=True)
elif error == 'honey':
self.Tell([title, '', 'Hmm. Somehow, you triggered the spam-filter.'
], error=True)
elif error in ('domaintaken', 'domain', 'subdomain'):
self.Tell([title, '',
'Sorry, that domain (%s) is unavailable.' % subject,
'',
'If you registered it already, perhaps you need to log on with',
'a different e-mail address?',
''
], error=True)
elif error == 'checkfailed':
self.Tell([title, '',
'That domain (%s) is not correctly set up.' % subject
], error=True)
elif error == 'network':
self.Tell([title, '',
'There was a problem communicating with %s.' % subject, '',
'Please verify that you have a working'
' Internet connection and try again!'
], error=True)
else:
self.Tell([title, 'Error code: %s' % error, 'Try again later?'
], error=True)
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
import mock
from twisted.trial import unittest
from twisted.internet import defer, reactor
from twisted.python import failure
from buildbot.test.util import compat
from buildbot.test.fake import fakedb, fakemaster
from buildbot.process import buildrequestdistributor
from buildbot.util import epoch2datetime
from buildbot.util.eventual import fireEventually
from buildbot.db import buildrequests
def nth_slave(n):
def pick_nth_by_name(lst):
slaves = lst[:]
slaves.sort(cmp=lambda a,b: cmp(a.name, b.name))
return slaves[n]
return pick_nth_by_name
class SkipSlavesThatCantGetLock(buildrequestdistributor.BasicBuildChooser):
"""This class disables the 'rejectedSlaves' feature"""
def __init__(self, *args, **kwargs):
buildrequestdistributor.BasicBuildChooser.__init__(self, *args, **kwargs)
self.rejectedSlaves = None # disable this feature
class Test(unittest.TestCase):
def setUp(self):
self.botmaster = mock.Mock(name='botmaster')
self.botmaster.builders = {}
def prioritizeBuilders(master, builders):
# simple sort-by-name by default
return sorted(builders, lambda b1,b2 : cmp(b1.name, b2.name))
self.master = self.botmaster.master = mock.Mock(name='master')
self.master.config.prioritizeBuilders = prioritizeBuilders
self.master.db = fakedb.FakeDBConnector(self)
self.brd = buildrequestdistributor.BuildRequestDistributor(self.botmaster)
self.brd.startService()
# TODO: this is a terrible way to detect the "end" of the test -
# it regularly completes too early after a simple modification of
# a test. Is there a better way?
self.quiet_deferred = defer.Deferred()
def _quiet():
if self.quiet_deferred:
d, self.quiet_deferred = self.quiet_deferred, None
d.callback(None)
else:
self.fail("loop has already gone quiet once")
self.brd._quiet = _quiet
self.builders = {}
def tearDown(self):
if self.brd.running:
return self.brd.stopService()
def checkAllCleanedUp(self):
# check that the BRD didnt end with a stuck lock or in the 'active' state (which would mean
# it ended without unwinding correctly)
self.assertEqual(self.brd.pending_builders_lock.locked, False)
self.assertEqual(self.brd.activity_lock.locked, False)
self.assertEqual(self.brd.active, False)
def useMock_maybeStartBuildsOnBuilder(self):
# sets up a mock "maybeStartBuildsOnBuilder" so we can track
# how the method gets invoked
# keep track of the calls to brd.maybeStartBuildsOnBuilder
self.maybeStartBuildsOnBuilder_calls = []
def maybeStartBuildsOnBuilder(bldr):
self.assertIdentical(self.builders[bldr.name], bldr)
self.maybeStartBuildsOnBuilder_calls.append(bldr.name)
return fireEventually()
self.brd._maybeStartBuildsOnBuilder = maybeStartBuildsOnBuilder
def addBuilders(self, names):
self.startedBuilds = []
for name in names:
bldr = mock.Mock(name=name)
bldr.name = name
self.botmaster.builders[name] = bldr
self.builders[name] = bldr
def maybeStartBuild(*args):
self.startedBuilds.append((name, args))
d = defer.Deferred()
reactor.callLater(0, d.callback, None)
return d
bldr.maybeStartBuild = maybeStartBuild
bldr.canStartWithSlavebuilder = lambda _: True
bldr.slaves = []
bldr.getAvailableSlaves = lambda : [ s for s in bldr.slaves if s.isAvailable ]
def removeBuilder(self, name):
del self.builders[name]
del self.botmaster.builders[name]
# tests
def test_maybeStartBuildsOn_simple(self):
self.useMock_maybeStartBuildsOnBuilder()
self.addBuilders(['bldr1'])
self.brd.maybeStartBuildsOn(['bldr1'])
def check(_):
self.assertEqual(self.maybeStartBuildsOnBuilder_calls, ['bldr1'])
self.checkAllCleanedUp()
self.quiet_deferred.addCallback(check)
return self.quiet_deferred
def test_maybeStartBuildsOn_parallel(self):
# test 15 "parallel" invocations of maybeStartBuildsOn, with a
# _sortBuilders that takes a while. This is a regression test for bug
# #1979.
builders = ['bldr%02d' % i for i in xrange(15) ]
def slow_sorter(master, bldrs):
bldrs.sort(lambda b1, b2 : cmp(b1.name, b2.name))
d = defer.Deferred()
reactor.callLater(0, d.callback, bldrs)
def done(_):
return _
d.addCallback(done)
return d
self.master.config.prioritizeBuilders = slow_sorter
self.useMock_maybeStartBuildsOnBuilder()
self.addBuilders(builders)
for bldr in builders:
self.brd.maybeStartBuildsOn([bldr])
def check(_):
self.assertEqual(self.maybeStartBuildsOnBuilder_calls, builders)
self.checkAllCleanedUp()
self.quiet_deferred.addCallback(check)
return self.quiet_deferred
@compat.usesFlushLoggedErrors
def test_maybeStartBuildsOn_exception(self):
self.addBuilders(['bldr1'])
def _maybeStartBuildsOnBuilder(n):
# fail slowly, so that the activity loop doesn't go quiet too soon
d = defer.Deferred()
reactor.callLater(0,
d.errback, failure.Failure(RuntimeError("oh noes")))
return d
self.brd._maybeStartBuildsOnBuilder = _maybeStartBuildsOnBuilder
self.brd.maybeStartBuildsOn(['bldr1'])
def check(_):
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
self.checkAllCleanedUp()
self.quiet_deferred.addCallback(check)
return self.quiet_deferred
def test_maybeStartBuildsOn_collapsing(self):
self.useMock_maybeStartBuildsOnBuilder()
self.addBuilders(['bldr1', 'bldr2', 'bldr3'])
self.brd.maybeStartBuildsOn(['bldr3'])
self.brd.maybeStartBuildsOn(['bldr2', 'bldr1'])
self.brd.maybeStartBuildsOn(['bldr4']) # should be ignored
self.brd.maybeStartBuildsOn(['bldr2']) # already queued - ignored
self.brd.maybeStartBuildsOn(['bldr3', 'bldr2'])
def check(_):
# bldr3 gets invoked twice, since it's considered to have started
# already when the first call to maybeStartBuildsOn returns
self.assertEqual(self.maybeStartBuildsOnBuilder_calls,
['bldr3', 'bldr1', 'bldr2', 'bldr3'])
self.checkAllCleanedUp()
self.quiet_deferred.addCallback(check)
return self.quiet_deferred
def test_maybeStartBuildsOn_builders_missing(self):
self.useMock_maybeStartBuildsOnBuilder()
self.addBuilders(['bldr1', 'bldr2', 'bldr3'])
self.brd.maybeStartBuildsOn(['bldr1', 'bldr2', 'bldr3'])
# bldr1 is already run, so surreptitiously remove the other
# two - nothing should crash, but the builders should not run
self.removeBuilder('bldr2')
self.removeBuilder('bldr3')
def check(_):
self.assertEqual(self.maybeStartBuildsOnBuilder_calls, ['bldr1'])
self.checkAllCleanedUp()
self.quiet_deferred.addCallback(check)
return self.quiet_deferred
def do_test_sortBuilders(self, prioritizeBuilders, oldestRequestTimes,
expected, returnDeferred=False):
self.useMock_maybeStartBuildsOnBuilder()
self.addBuilders(oldestRequestTimes.keys())
self.master.config.prioritizeBuilders = prioritizeBuilders
def mklambda(t): # work around variable-binding issues
if returnDeferred:
return lambda : defer.succeed(t)
else:
return lambda : t
for n, t in oldestRequestTimes.iteritems():
if t is not None:
t = epoch2datetime(t)
self.builders[n].getOldestRequestTime = mklambda(t)
d = self.brd._sortBuilders(oldestRequestTimes.keys())
def check(result):
self.assertEqual(result, expected)
self.checkAllCleanedUp()
d.addCallback(check)
return d
def test_sortBuilders_default_sync(self):
return self.do_test_sortBuilders(None, # use the default sort
dict(bldr1=777, bldr2=999, bldr3=888),
['bldr1', 'bldr3', 'bldr2'])
def test_sortBuilders_default_asyn(self):
return self.do_test_sortBuilders(None, # use the default sort
dict(bldr1=777, bldr2=999, bldr3=888),
['bldr1', 'bldr3', 'bldr2'],
returnDeferred=True)
def test_sortBuilders_default_None(self):
return self.do_test_sortBuilders(None, # use the default sort
dict(bldr1=777, bldr2=None, bldr3=888),
['bldr1', 'bldr3', 'bldr2'])
def test_sortBuilders_custom(self):
def prioritizeBuilders(master, builders):
self.assertIdentical(master, self.master)
return sorted(builders, key=lambda b : b.name)
return self.do_test_sortBuilders(prioritizeBuilders,
dict(bldr1=1, bldr2=1, bldr3=1),
['bldr1', 'bldr2', 'bldr3'])
def test_sortBuilders_custom_async(self):
def prioritizeBuilders(master, builders):
self.assertIdentical(master, self.master)
return defer.succeed(sorted(builders, key=lambda b : b.name))
return self.do_test_sortBuilders(prioritizeBuilders,
dict(bldr1=1, bldr2=1, bldr3=1),
['bldr1', 'bldr2', 'bldr3'])
@compat.usesFlushLoggedErrors
def test_sortBuilders_custom_exception(self):
self.useMock_maybeStartBuildsOnBuilder()
self.addBuilders(['x', 'y'])
def fail(m, b):
raise RuntimeError("oh noes")
self.master.config.prioritizeBuilders = fail
# expect to get the builders back in the same order in the event of an
# exception
d = self.brd._sortBuilders(['y', 'x'])
def check(result):
self.assertEqual(result, ['y', 'x'])
# and expect the exception to be logged
self.assertEqual(len(self.flushLoggedErrors(RuntimeError)), 1)
d.addCallback(check)
return d
def test_stopService(self):
# check that stopService waits for a builder run to complete, but does not
# allow a subsequent run to start
self.useMock_maybeStartBuildsOnBuilder()
self.addBuilders(['A', 'B'])
oldMSBOB = self.brd._maybeStartBuildsOnBuilder
def maybeStartBuildsOnBuilder(bldr):
d = oldMSBOB(bldr)
stop_d = self.brd.stopService()
stop_d.addCallback(lambda _ :
self.maybeStartBuildsOnBuilder_calls.append('(stopped)'))
d.addCallback(lambda _ :
self.maybeStartBuildsOnBuilder_calls.append('finished'))
return d
self.brd._maybeStartBuildsOnBuilder = maybeStartBuildsOnBuilder
# start both builds; A should start and complete *before* the service stops,
# and B should not run.
self.brd.maybeStartBuildsOn(['A', 'B'])
def check(_):
self.assertEqual(self.maybeStartBuildsOnBuilder_calls,
['A', 'finished', '(stopped)'])
self.quiet_deferred.addCallback(check)
return self.quiet_deferred
class TestMaybeStartBuilds(unittest.TestCase):
def setUp(self):
self.botmaster = mock.Mock(name='botmaster')
self.botmaster.builders = {}
self.master = self.botmaster.master = mock.Mock(name='master')
self.master.db = fakedb.FakeDBConnector(self)
class getCache(object):
def get_cache(self):
return self
def get(self, name):
return
self.master.caches = fakemaster.FakeCaches()
self.brd = buildrequestdistributor.BuildRequestDistributor(self.botmaster)
self.brd.startService()
self.startedBuilds = []
# TODO: this is a terrible way to detect the "end" of the test -
# it regularly completes too early after a simple modification of
# a test. Is there a better way?
self.quiet_deferred = defer.Deferred()
def _quiet():
if self.quiet_deferred:
d, self.quiet_deferred = self.quiet_deferred, None
d.callback(None)
else:
self.fail("loop has already gone quiet once")
self.brd._quiet = _quiet
self.bldr = self.createBuilder('A')
# a collection of rows that would otherwise clutter up every test
self.base_rows = [
fakedb.SourceStampSet(id=21),
fakedb.SourceStamp(id=21, sourcestampsetid=21),
fakedb.Buildset(id=11, reason='because', sourcestampsetid=21),
]
def tearDown(self):
if self.brd.running:
return self.brd.stopService()
def createBuilder(self, name):
bldr = mock.Mock(name=name)
bldr.name = name
self.botmaster.builders[name] = bldr
def maybeStartBuild(slave, builds):
self.startedBuilds.append((slave.name, builds))
return defer.succeed(True)
bldr.maybeStartBuild = maybeStartBuild
bldr.canStartWithSlavebuilder = lambda _: True
bldr.getMergeRequestsFn = lambda : False
bldr.slaves = []
bldr.getAvailableSlaves = lambda : [ s for s in bldr.slaves if s.isAvailable() ]
bldr.config.nextSlave = None
bldr.config.nextBuild = None
def canStartBuild(*args):
can = bldr.config.canStartBuild
return not can or can(*args)
bldr.canStartBuild = canStartBuild
return bldr
def addSlaves(self, slavebuilders):
"""C{slaves} maps name : available"""
for name, avail in slavebuilders.iteritems():
sb = mock.Mock(spec=['isAvailable'], name=name)
sb.name = name
sb.isAvailable.return_value = avail
self.bldr.slaves.append(sb)
def assertBuildsStarted(self, exp):
# munge builds_started into (slave, [brids])
builds_started = [
(slave, [br.id for br in breqs])
for (slave, breqs) in self.startedBuilds ]
self.assertEqual(sorted(builds_started), sorted(exp))
# _maybeStartBuildsOnBuilder
@defer.inlineCallbacks
def do_test_maybeStartBuildsOnBuilder(self, rows=[], exp_claims=[], exp_builds=[]):
yield self.master.db.insertTestData(rows)
yield self.brd._maybeStartBuildsOnBuilder(self.bldr)
self.master.db.buildrequests.assertMyClaims(exp_claims)
self.assertBuildsStarted(exp_builds)
@defer.inlineCallbacks
def test_no_buildreqests(self):
self.addSlaves({'test-slave11':1})
yield self.do_test_maybeStartBuildsOnBuilder(exp_claims=[], exp_builds=[])
@defer.inlineCallbacks
def test_no_slavebuilders(self):
rows = [
fakedb.BuildRequest(id=11, buildsetid=10, buildername="bldr"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], exp_builds=[])
@defer.inlineCallbacks
def test_limited_by_slaves(self):
self.master.config.mergeRequests = False
self.addSlaves({'test-slave1':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[10], exp_builds=[('test-slave1', [10])])
@defer.inlineCallbacks
def test_sorted_by_submit_time(self):
self.master.config.mergeRequests = False
# same as "limited_by_slaves" but with rows swapped
self.addSlaves({'test-slave1':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[10], exp_builds=[('test-slave1', [10])])
@defer.inlineCallbacks
def test_limited_by_available_slaves(self):
self.master.config.mergeRequests = False
self.addSlaves({'test-slave1':0, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[10], exp_builds=[('test-slave2', [10])])
@defer.inlineCallbacks
def test_slow_db(self):
# test what happens if the "getBuildRequests" fetch takes a "long time"
self.master.config.mergeRequests = False
self.addSlaves({'test-slave1':1})
# wrap to simulate a "long" db access
old_getBuildRequests = self.master.db.buildrequests.getBuildRequests
def longGetBuildRequests(*args, **kwargs):
res_d = old_getBuildRequests(*args, **kwargs)
long_d = defer.Deferred()
long_d.addCallback(lambda _: res_d)
reactor.callLater(0, long_d.callback, None)
return long_d
self.master.db.buildrequests.getBuildRequests = longGetBuildRequests
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[10], exp_builds=[('test-slave1', [10])])
@mock.patch('random.choice', nth_slave(-1))
@defer.inlineCallbacks
def test_limited_by_canStartBuild(self):
"""Set the 'canStartBuild' value in the config to something
that limits the possible options."""
self.master.config.mergeRequests = False
slaves_attempted = []
def _canStartWithSlavebuilder(slavebuilder):
slaves_attempted.append(slavebuilder.name)
return True
self.bldr.canStartWithSlavebuilder = _canStartWithSlavebuilder
pairs_tested = []
def _canStartBuild(slave, breq):
result = (slave.name, breq.id)
pairs_tested.append(result)
allowed = [
("test-slave1", 10),
("test-slave3", 11),
]
return result in allowed
self.bldr.config.canStartBuild = _canStartBuild
self.addSlaves({'test-slave1':1, 'test-slave2':1, 'test-slave3':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
fakedb.BuildRequest(id=12, buildsetid=11, buildername="A",
submitted_at=140000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[10, 11], exp_builds=[('test-slave1', [10]), ('test-slave3', [11])])
self.assertEqual(slaves_attempted, ['test-slave3', 'test-slave2', 'test-slave1'])
# we expect brids in order (10-11-12),
# with each searched in reverse order of slaves (3-2-1) available (due to nth_slave(-1))
self.assertEqual(pairs_tested, [
('test-slave3', 10),
('test-slave2', 10),
('test-slave1', 10),
('test-slave3', 11),
('test-slave2', 12)])
@mock.patch('random.choice', nth_slave(-1))
@mock.patch('buildbot.process.buildrequestdistributor.BuildRequestDistributor.BuildChooser', SkipSlavesThatCantGetLock)
@defer.inlineCallbacks
def test_limited_by_canStartBuild_deferreds(self):
"""Another variant that:
* returns Defered types,
* use 'canStartWithSlavebuilder' to reject one of the slaves
* patch using SkipSlavesThatCantGetLock to disable the 'rejectedSlaves' feature"""
self.master.config.mergeRequests = False
slaves_attempted = []
def _canStartWithSlavebuilder(slavebuilder):
slaves_attempted.append(slavebuilder.name)
allowed = slavebuilder.name in ['test-slave2', 'test-slave1']
return defer.succeed(allowed) # a defered here!
self.bldr.canStartWithSlavebuilder = _canStartWithSlavebuilder
pairs_tested = []
def _canStartBuild(slave, breq):
result = (slave.name, breq.id)
pairs_tested.append(result)
allowed = [
("test-slave1", 10),
("test-slave3", 11),
]
return defer.succeed(result in allowed)
self.bldr.config.canStartBuild = _canStartBuild
self.addSlaves({'test-slave1':1, 'test-slave2':1, 'test-slave3':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
fakedb.BuildRequest(id=12, buildsetid=11, buildername="A",
submitted_at=140000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[10], exp_builds=[('test-slave1', [10])])
self.assertEqual(slaves_attempted, ['test-slave3', 'test-slave2', 'test-slave1'])
# we expect brids in order (10-11-12),
# with slave3 skipped, and slave2 unable to pair
self.assertEqual(pairs_tested, [
('test-slave2', 10),
('test-slave1', 10),
('test-slave2', 11),
('test-slave2', 12)])
@mock.patch('random.choice', nth_slave(-1))
@defer.inlineCallbacks
def test_limited_by_canStartWithSlavebuilder(self):
self.master.config.mergeRequests = False
slaves_attempted = []
def _canStartWithSlavebuilder(slavebuilder):
slaves_attempted.append(slavebuilder.name)
return (slavebuilder.name == 'test-slave3')
self.bldr.canStartWithSlavebuilder = _canStartWithSlavebuilder
self.addSlaves({'test-slave1':0, 'test-slave2':1, 'test-slave3':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[10, 11], exp_builds=[('test-slave3', [10]), ('test-slave2', [11])])
self.assertEqual(slaves_attempted, ['test-slave3', 'test-slave2'])
@mock.patch('random.choice', nth_slave(-1))
@defer.inlineCallbacks
def test_unlimited(self):
self.master.config.mergeRequests = False
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[10, 11],
exp_builds=[('test-slave2', [10]), ('test-slave1', [11])])
@mock.patch('random.choice', nth_slave(-1))
@defer.inlineCallbacks
def test_bldr_maybeStartBuild_fails_always(self):
# the builder fails to start the build; we'll see that the build
# was requested, but the brids will get reclaimed
def maybeStartBuild(slave, builds):
self.startedBuilds.append((slave.name, builds))
return defer.succeed(False)
self.bldr.maybeStartBuild = maybeStartBuild
self.master.config.mergeRequests = False
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], # reclaimed so none taken!
exp_builds=[('test-slave2', [10]), ('test-slave1', [11])])
@mock.patch('random.choice', nth_slave(-1))
@defer.inlineCallbacks
def test_bldr_maybeStartBuild_fails_once(self):
# the builder fails to start the build; we'll see that the build
# was requested, but the brids will get reclaimed
def maybeStartBuild(slave, builds, _fail=[False]):
self.startedBuilds.append((slave.name, builds))
ret = _fail[0]
_fail[0] = True
return defer.succeed(ret)
self.bldr.maybeStartBuild = maybeStartBuild
self.master.config.mergeRequests = False
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000),
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
]
yield self.master.db.insertTestData(rows)
# first time around, only #11 stays claimed
yield self.brd._maybeStartBuildsOnBuilder(self.bldr)
self.master.db.buildrequests.assertMyClaims([11]) # reclaimed so none taken!
self.assertBuildsStarted([('test-slave2', [10]), ('test-slave1', [11])])
# second time around the #10 will pass, adding another request and it is claimed
yield self.brd._maybeStartBuildsOnBuilder(self.bldr)
self.master.db.buildrequests.assertMyClaims([10, 11])
self.assertBuildsStarted([('test-slave2', [10]), ('test-slave1', [11]), ('test-slave2', [10])])
@mock.patch('random.choice', nth_slave(1))
@defer.inlineCallbacks
def test_limited_by_requests(self):
self.master.config.mergeRequests = False
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[11], exp_builds=[('test-slave2', [11])])
@defer.inlineCallbacks
def test_nextSlave_None(self):
self.bldr.config.nextSlave = lambda _1,_2 : defer.succeed(None)
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], exp_builds=[])
@defer.inlineCallbacks
def test_nextSlave_bogus(self):
self.bldr.config.nextSlave = lambda _1,_2 : defer.succeed(mock.Mock())
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], exp_builds=[])
@defer.inlineCallbacks
def test_nextSlave_fails(self):
def nextSlaveRaises(*args):
raise RuntimeError("xx")
self.bldr.config.nextSlave = nextSlaveRaises
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], exp_builds=[])
@defer.inlineCallbacks
def test_nextBuild_None(self):
self.bldr.config.nextBuild = lambda _1,_2 : defer.succeed(None)
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], exp_builds=[])
@defer.inlineCallbacks
def test_nextBuild_bogus(self):
self.bldr.config.nextBuild = lambda _1,_2 : mock.Mock()
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], exp_builds=[])
@defer.inlineCallbacks
def test_nextBuild_fails(self):
def nextBuildRaises(*args):
raise RuntimeError("xx")
self.bldr.config.nextBuild = nextBuildRaises
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], exp_builds=[])
# check concurrency edge cases
@mock.patch('random.choice', nth_slave(0))
@defer.inlineCallbacks
def test_claim_race(self):
# fake a race condition on the buildrequests table
old_claimBuildRequests = self.master.db.buildrequests.claimBuildRequests
def claimBuildRequests(brids):
# first, ensure this only happens the first time
self.master.db.buildrequests.claimBuildRequests = old_claimBuildRequests
# claim brid 10 for some other master
assert 10 in brids
self.master.db.buildrequests.fakeClaimBuildRequest(10, 136000,
objectid=9999) # some other objectid
# ..and fail
return defer.fail(buildrequests.AlreadyClaimedError())
self.master.db.buildrequests.claimBuildRequests = claimBuildRequests
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=10, buildsetid=11, buildername="A",
submitted_at=130000), # will turn out to be claimed!
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A",
submitted_at=135000),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[11], exp_builds=[('test-slave1', [11])])
# nextSlave
@defer.inlineCallbacks
def do_test_nextSlave(self, nextSlave, exp_choice=None):
for i in range(4):
self.addSlaves({'sb%d'%i: 1})
self.bldr.config.nextSlave = nextSlave
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="A"),
]
if exp_choice is None:
exp_claims = []
exp_builds = []
else:
exp_claims = [11]
exp_builds = [('sb%d'%exp_choice, [11])]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=exp_claims, exp_builds=exp_builds)
@mock.patch('random.choice', nth_slave(2))
def test_nextSlave_default(self):
return self.do_test_nextSlave(None, exp_choice=2)
def test_nextSlave_simple(self):
def nextSlave(bldr, lst):
self.assertIdentical(bldr, self.bldr)
return lst[1]
return self.do_test_nextSlave(nextSlave, exp_choice=1)
def test_nextSlave_deferred(self):
def nextSlave(bldr, lst):
self.assertIdentical(bldr, self.bldr)
return defer.succeed(lst[1])
return self.do_test_nextSlave(nextSlave, exp_choice=1)
def test_nextSlave_exception(self):
def nextSlave(bldr, lst):
raise RuntimeError("")
return self.do_test_nextSlave(nextSlave)
def test_nextSlave_failure(self):
def nextSlave(bldr, lst):
return defer.fail(failure.Failure(RuntimeError()))
return self.do_test_nextSlave(nextSlave)
# _nextBuild
@mock.patch('random.choice', nth_slave(-1))
@defer.inlineCallbacks
def do_test_nextBuild(self, nextBuild, exp_choice=None):
self.bldr.config.nextBuild = nextBuild
self.master.config.mergeRequests = False
rows = self.base_rows[:]
for i in range(4):
rows.append(fakedb.Buildset(id=100+i, reason='because', sourcestampsetid=21))
rows.append(fakedb.BuildRequest(id=10+i, buildsetid=100+i, buildername="A"))
self.addSlaves({'test-slave%d'%i:1})
exp_claims = []
exp_builds = []
if exp_choice is not None:
slave = 3
for choice in exp_choice:
exp_claims.append(choice)
exp_builds.append(('test-slave%d'%slave, [choice]))
slave = slave - 1
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=sorted(exp_claims), exp_builds=exp_builds)
def test_nextBuild_default(self):
"default chooses the first in the list, which should be the earliest"
return self.do_test_nextBuild(None, exp_choice=[10, 11, 12, 13])
def test_nextBuild_simple(self):
def nextBuild(bldr, lst):
self.assertIdentical(bldr, self.bldr)
return lst[-1]
return self.do_test_nextBuild(nextBuild, exp_choice=[13, 12, 11, 10])
def test_nextBuild_deferred(self):
def nextBuild(bldr, lst):
self.assertIdentical(bldr, self.bldr)
return defer.succeed(lst[-1])
return self.do_test_nextBuild(nextBuild, exp_choice=[13, 12, 11, 10])
def test_nextBuild_exception(self):
def nextBuild(bldr, lst):
raise RuntimeError("")
return self.do_test_nextBuild(nextBuild)
def test_nextBuild_failure(self):
def nextBuild(bldr, lst):
return defer.fail(failure.Failure(RuntimeError()))
return self.do_test_nextBuild(nextBuild)
# merge tests
@defer.inlineCallbacks
def test_merge_ordering(self):
# (patch_random=True)
self.bldr.getMergeRequestsFn = lambda : lambda _, req1, req2: req1.canBeMergedWith(req2)
self.addSlaves({'test-slave1':1})
# based on the build in bug #2249
rows = [
fakedb.SourceStampSet(id=1976),
fakedb.SourceStamp(id=1976, sourcestampsetid=1976),
fakedb.Buildset(id=1980, reason='scheduler', sourcestampsetid=1976,
submitted_at=1332024020.67792),
fakedb.BuildRequest(id=42880, buildsetid=1980,
submitted_at=1332024020.67792, buildername="A"),
fakedb.SourceStampSet(id=1977),
fakedb.SourceStamp(id=1977, sourcestampsetid=1977),
fakedb.Buildset(id=1981, reason='scheduler', sourcestampsetid=1977,
submitted_at=1332025495.19141),
fakedb.BuildRequest(id=42922, buildsetid=1981,
buildername="A", submitted_at=1332025495.19141),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[42880, 42922],
exp_builds=[('test-slave1', [42880, 42922])])
@mock.patch('random.choice', nth_slave(0))
@defer.inlineCallbacks
def test_mergeRequests(self):
# set up all of the data required for a BuildRequest object
rows = [
fakedb.SourceStampSet(id=234),
fakedb.SourceStamp(id=234, sourcestampsetid=234),
fakedb.Buildset(id=30, sourcestampsetid=234, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=19, buildsetid=30, buildername='A',
priority=13, submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=20, buildsetid=30, buildername='A',
priority=13, submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=21, buildsetid=30, buildername='A',
priority=13, submitted_at=1300305712, results=-1),
]
self.addSlaves({'test-slave1':1, 'test-slave2': 1})
def mergeRequests_fn(builder, breq, other):
# merge evens with evens, odds with odds
self.assertIdentical(builder, self.bldr)
return breq.id % 2 == other.id % 2
self.bldr.getMergeRequestsFn = lambda : mergeRequests_fn
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[19, 20, 21],
exp_builds=[
('test-slave1', [19, 21]),
('test-slave2', [20])
])
@mock.patch('random.choice', nth_slave(0))
@defer.inlineCallbacks
def test_mergeRequest_no_other_request(self):
""" Test if builder test for codebases in requests """
# set up all of the data required for a BuildRequest object
rows = [
fakedb.SourceStampSet(id=234),
fakedb.SourceStamp(id=234, sourcestampsetid=234, codebase='A'),
fakedb.Change(changeid=14, codebase='A'),
fakedb.SourceStampChange(sourcestampid=234, changeid=14),
fakedb.Buildset(id=30, sourcestampsetid=234, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=19, buildsetid=30, buildername='A',
priority=13, submitted_at=1300305712, results=-1),
]
self.addSlaves({'test-slave1':1, 'test-slave2': 1})
def mergeRequests_fn(builder, breq, other):
# Allow all requests
self.fail("Should never be called")
return True
self.bldr.getMergeRequestsFn = lambda : mergeRequests_fn
# check if the request remains the same
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[19],
exp_builds=[
('test-slave1', [19]),
])
@mock.patch('random.choice', nth_slave(0))
@defer.inlineCallbacks
def test_mergeRequests_no_merging(self):
""" Test if builder test for codebases in requests """
# set up all of the data required for a BuildRequest object
rows = [
fakedb.SourceStampSet(id=234),
fakedb.SourceStamp(id=234, sourcestampsetid=234, codebase='C'),
fakedb.Buildset(id=30, sourcestampsetid=234, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.SourceStampSet(id=235),
fakedb.SourceStamp(id=235, sourcestampsetid=235, codebase='C'),
fakedb.Buildset(id=31, sourcestampsetid=235, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.SourceStampSet(id=236),
fakedb.SourceStamp(id=236, sourcestampsetid=236, codebase='C'),
fakedb.Buildset(id=32, sourcestampsetid=236, reason='foo',
submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=19, buildsetid=30, buildername='A',
priority=13, submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=20, buildsetid=31, buildername='A',
priority=13, submitted_at=1300305712, results=-1),
fakedb.BuildRequest(id=21, buildsetid=32, buildername='A',
priority=13, submitted_at=1300305712, results=-1),
]
self.addSlaves({'test-slave1':1, 'test-slave2': 1})
def mergeRequests_fn(builder, breq, other):
# Fail all merge attempts
return False
self.bldr.getMergeRequestsFn = lambda : mergeRequests_fn
# check if all are merged
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[19, 20],
exp_builds=[
('test-slave1', [19]),
('test-slave2', [20]),
])
@defer.inlineCallbacks
def test_mergeRequests_fails(self):
def mergeRequests_fn(*args):
raise RuntimeError("xx")
self.bldr.getMergeRequestsFn = lambda : mergeRequests_fn
self.addSlaves({'test-slave1':1, 'test-slave2':1})
rows = self.base_rows + [
fakedb.BuildRequest(id=11, buildsetid=11, buildername="bldr"),
]
yield self.do_test_maybeStartBuildsOnBuilder(rows=rows,
exp_claims=[], exp_builds=[])
# Copyright (C) 2010-2013 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see .
# This file is part of the GDB testsuite.
import re
class pp_ss:
def __init__(self, val):
self.val = val
def to_string(self):
return "a=<" + str(self.val["a"]) + "> b=<" + str(self.val["b"]) + ">"
def lookup_function (val):
"Look-up and return a pretty-printer that can print val."
# Get the type.
type = val.type
# If it points to a reference, get the reference.
if type.code == gdb.TYPE_CODE_REF:
type = type.target ()
# Get the unqualified type, stripped of typedefs.
type = type.unqualified ().strip_typedefs ()
# Get the type name.
typename = type.tag
if typename == None:
return None
# Iterate over local dictionary of types to determine
# if a printer is registered for that type. Return an
# instantiation of the printer if found.
for function in pretty_printers_dict:
if function.match (typename):
return pretty_printers_dict[function] (val)
# Cannot find a pretty printer. Return None.
return None
def register_pretty_printers ():
pretty_printers_dict[re.compile ('^ss$')] = pp_ss
pretty_printers_dict = {}
register_pretty_printers ()
gdb.current_progspace().pretty_printers.append (lookup_function)
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
depends_on = (
('videos', '0106_fill_subtitle_count.py'),
)
def forwards(self, orm):
i = 0
for u in orm.CustomUser.objects.all():
i += 1
if i % 100 == 0:
print i
for v in u.followed_videos.all():
u.videos.add(v)
for sl in u.followed_languages.select_related('video'):
u.videos.add(sl.video)
def backwards(self, orm):
"Write your backwards methods here."
models = {
'auth.announcement': {
'Meta': {'object_name': 'Announcement'},
'content': ('django.db.models.fields.CharField', [], {'max_length': '500'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'auth.awards': {
'Meta': {'object_name': 'Awards'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'points': ('django.db.models.fields.IntegerField', [], {}),
'type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'})
},
'auth.customuser': {
'Meta': {'object_name': 'CustomUser', '_ormbases': ['auth.User']},
'autoplay_preferences': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'award_points': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'biography': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'changes_notification': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'homepage': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'last_ip': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'picture': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'preferred_language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True', 'primary_key': 'True'}),
'valid_email': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'videos': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['videos.Video']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.message': {
'Meta': {'object_name': 'Message'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'_message_set'", 'to': "orm['auth.User']"})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'auth.userlanguage': {
'Meta': {'unique_together': "(['user', 'language'],)", 'object_name': 'UserLanguage'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16'}),
'proficiency': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'comments.comment': {
'Meta': {'object_name': 'Comment'},
'content': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'reply_to': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']"})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.action': {
'Meta': {'object_name': 'Action'},
'action_type': ('django.db.models.fields.IntegerField', [], {}),
'comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['comments.Comment']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'new_video_title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"})
},
'videos.subtitle': {
'Meta': {'unique_together': "(('version', 'subtitle_id'),)", 'object_name': 'Subtitle'},
'draft': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleDraft']", 'null': 'True'}),
'end_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'start_time': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'subtitle_order': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'subtitle_text': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'})
},
'videos.subtitledraft': {
'Meta': {'object_name': 'SubtitleDraft'},
'browser_id': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'last_saved_packet': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'parent_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'})
},
'videos.subtitlelanguage': {
'Meta': {'unique_together': "(('video', 'language', 'standard_language'),)", 'object_name': 'SubtitleLanguage'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_languages'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'had_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'has_version': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '16', 'blank': 'True'}),
'last_version': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleVersion']", 'null': 'True', 'blank': 'True'}),
'percent_done': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'standard_language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']", 'null': 'True', 'blank': 'True'}),
'subtitle_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.subtitleversion': {
'Meta': {'unique_together': "(('language', 'version_no'),)", 'object_name': 'SubtitleVersion'},
'datetime_started': ('django.db.models.fields.DateTimeField', [], {}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_forked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.SubtitleLanguage']"}),
'note': ('django.db.models.fields.CharField', [], {'max_length': '512', 'blank': 'True'}),
'notification_sent': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'text_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'time_change': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True'}),
'version_no': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'videos.usertestresult': {
'Meta': {'object_name': 'UserTestResult'},
'browser': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'get_updates': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'task1': ('django.db.models.fields.TextField', [], {}),
'task2': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'task3': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
'videos.video': {
'Meta': {'object_name': 'Video'},
'allow_community_edits': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'allow_video_urls_edit': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'complete_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'duration': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'edited': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'followers': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'followed_videos'", 'blank': 'True', 'to': "orm['auth.CustomUser']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'languages_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
's3_thumbnail': ('utils.amazon.fields.S3EnabledImageField', [], {'max_length': '100', 'blank': 'True'}),
'subtitles_fetched_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'thumbnail': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'video_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'view_count': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'was_subtitled': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True', 'blank': 'True'}),
'widget_views_count': ('django.db.models.fields.IntegerField', [], {'default': '0', 'db_index': 'True'}),
'writelock_owner': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'writelock_owners'", 'null': 'True', 'to': "orm['auth.CustomUser']"}),
'writelock_session_key': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'writelock_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True'})
},
'videos.videofeed': {
'Meta': {'object_name': 'VideoFeed'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'})
},
'videos.videourl': {
'Meta': {'object_name': 'VideoUrl'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.CustomUser']", 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'original': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'primary': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video']"}),
'videoid': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'})
}
}
complete_apps = ['videos', 'auth']
#!/usr/bin/python -OOOO
# vim: set fileencoding=utf8 shiftwidth=4 tabstop=4 textwidth=80 foldmethod=marker :
# Copyright (c) 2010, Kou Man Tong. All rights reserved.
# For licensing, see LICENSE file included in the package.
"""
BSON serialization and deserialization logic.
Specifications taken from: http://bsonspec.org/#/specification
The following types are unsupported, because for data exchange purposes, they're
over-engineered:
0x06 (Undefined)
0x07 (ObjectId)
0x0b (Regex - Exactly which flavor do you want? Better let higher level
programmers make that decision.)
0x0c (DBPointer)
0x0d (JavaScript code)
0x0e (Symbol)
0x0f (JS w/ scope)
0x11 (MongoDB-specific timestamp)
For binaries, only the default 0x0 type is supported.
>>> a = {
... u"Item A" : u"String item A",
... u"Item D" : {u"ROFLOL" : u"Blah blah blah"},
... u"Item C" : [1, 123456789012345, None, "Party and Bad Romance"],
... u"Item B" : u"\u4e00\u9580\u4e94\u5091"
... }
>>> def sorted(obj, dfs_stack):
... keys = obj.keys()
... keys.sort()
... for i in keys: yield i
...
>>> def reverse(obj, dfs_stack):
... keys = obj.keys()
... keys.sort(reverse = True)
... for i in keys: yield i
...
>>> serialized = dumps(a, sorted)
>>> serialized
'\\x9f\\x00\\x00\\x00\\x02Item A\\x00\\x0e\\x00\\x00\\x00String item A\\x00\\x02Item B\\x00\\r\\x00\\x00\\x00\\xe4\\xb8\\x80\\xe9\\x96\\x80\\xe4\\xba\\x94\\xe5\\x82\\x91\\x00\\x04Item C\\x007\\x00\\x00\\x00\\x100\\x00\\x01\\x00\\x00\\x00\\x121\\x00y\\xdf\\r\\x86Hp\\x00\\x00\\n2\\x00\\x053\\x00\\x15\\x00\\x00\\x00\\x00Party and Bad Romance\\x00\\x03Item D\\x00 \\x00\\x00\\x00\\x02ROFLOL\\x00\\x0f\\x00\\x00\\x00Blah blah blah\\x00\\x00\\x00'
>>>
>>> b = loads(serialized)
>>> b
{u'Item C': [1, 123456789012345, None, 'Party and Bad Romance'], u'Item B': u'\\u4e00\\u9580\\u4e94\\u5091', u'Item A': u'String item A', u'Item D': {u'ROFLOL': u'Blah blah blah'}}
>>> reverse_serialized = dumps(a, reverse)
>>> reverse_serialized
'\\x9f\\x00\\x00\\x00\\x03Item D\\x00 \\x00\\x00\\x00\\x02ROFLOL\\x00\\x0f\\x00\\x00\\x00Blah blah blah\\x00\\x00\\x04Item C\\x007\\x00\\x00\\x00\\x100\\x00\\x01\\x00\\x00\\x00\\x121\\x00y\\xdf\\r\\x86Hp\\x00\\x00\\n2\\x00\\x053\\x00\\x15\\x00\\x00\\x00\\x00Party and Bad Romance\\x00\\x02Item B\\x00\\r\\x00\\x00\\x00\\xe4\\xb8\\x80\\xe9\\x96\\x80\\xe4\\xba\\x94\\xe5\\x82\\x91\\x00\\x02Item A\\x00\\x0e\\x00\\x00\\x00String item A\\x00\\x00'
>>> c = loads(reverse_serialized)
>>> c
{u'Item C': [1, 123456789012345, None, 'Party and Bad Romance'], u'Item B': u'\\u4e00\\u9580\\u4e94\\u5091', u'Item A': u'String item A', u'Item D': {u'ROFLOL': u'Blah blah blah'}}
"""
from codec import *
import network
__all__ = ["loads", "dumps"]
# {{{ Serialization and Deserialization
def dumps(obj, generator = None):
"""
Given a dict, outputs a BSON string.
generator is an optional function which accepts the dictionary/array being
encoded, the current DFS traversal stack, and outputs an iterator indicating
the correct encoding order for keys.
"""
if isinstance(obj, BSONCoding):
return encode_object(obj, [], generator_func = generator)
return encode_document(obj, [], generator_func = generator)
def loads(data):
"""
Given a BSON string, outputs a dict.
"""
return decode_document(data, 0)[1]
# }}}
# {{{ Socket Patchers
def patch_socket():
"""
Patches the Python socket class such that sockets can send and receive BSON
objects atomically.
This adds the following functions to socket:
recvbytes(bytes_needed, sock_buf = None) - reads bytes_needed bytes
atomically. Returns None if socket closed.
recvobj() - reads a BSON document from the socket atomically and returns
the deserialized dictionary. Returns None if socket closed.
sendobj(obj) - sends a BSON document to the socket atomically.
"""
from socket import socket
socket.recvbytes = network._recvbytes
socket.recvobj = network._recvobj
socket.sendobj = network._sendobj
# }}}
import inspect
import os
import pkgutil
import warnings
from importlib import import_module
from threading import local
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from django.utils._os import upath
from django.utils.deprecation import RemovedInDjango20Warning
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
DEFAULT_DB_ALIAS = 'default'
DJANGO_VERSION_PICKLE_KEY = '_django_version'
class Error(Exception if six.PY3 else StandardError):
pass
class InterfaceError(Error):
pass
class DatabaseError(Error):
pass
class DataError(DatabaseError):
pass
class OperationalError(DatabaseError):
pass
class IntegrityError(DatabaseError):
pass
class InternalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
pass
class NotSupportedError(DatabaseError):
pass
class DatabaseErrorWrapper(object):
"""
Context manager and decorator that re-throws backend-specific database
exceptions using Django's common wrappers.
"""
def __init__(self, wrapper):
"""
wrapper is a database wrapper.
It must have a Database attribute defining PEP-249 exceptions.
"""
self.wrapper = wrapper
def __enter__(self):
pass
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
return
for dj_exc_type in (
DataError,
OperationalError,
IntegrityError,
InternalError,
ProgrammingError,
NotSupportedError,
DatabaseError,
InterfaceError,
Error,
):
db_exc_type = getattr(self.wrapper.Database, dj_exc_type.__name__)
if issubclass(exc_type, db_exc_type):
dj_exc_value = dj_exc_type(*exc_value.args)
dj_exc_value.__cause__ = exc_value
# Only set the 'errors_occurred' flag for errors that may make
# the connection unusable.
if dj_exc_type not in (DataError, IntegrityError):
self.wrapper.errors_occurred = True
six.reraise(dj_exc_type, dj_exc_value, traceback)
def __call__(self, func):
# Note that we are intentionally not using @wraps here for performance
# reasons. Refs #21109.
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def load_backend(backend_name):
# Look for a fully qualified database backend name
try:
return import_module('%s.base' % backend_name)
except ImportError as e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
backend_dir = os.path.join(os.path.dirname(upath(__file__)), 'backends')
try:
builtin_backends = [
name for _, name, ispkg in pkgutil.iter_modules([backend_dir])
if ispkg and name != 'dummy']
except EnvironmentError:
builtin_backends = []
if backend_name not in ['django.db.backends.%s' % b for b in
builtin_backends]:
backend_reprs = map(repr, sorted(builtin_backends))
error_msg = ("%r isn't an available database backend.\n"
"Try using 'django.db.backends.XXX', where XXX "
"is one of:\n %s\nError was: %s" %
(backend_name, ", ".join(backend_reprs), e_user))
raise ImproperlyConfigured(error_msg)
else:
# If there's some other error, this must be an error in Django
raise
class ConnectionDoesNotExist(Exception):
pass
class ConnectionHandler(object):
def __init__(self, databases=None):
"""
databases is an optional dictionary of database definitions (structured
like settings.DATABASES).
"""
self._databases = databases
self._connections = local()
@cached_property
def databases(self):
if self._databases is None:
self._databases = settings.DATABASES
if self._databases == {}:
self._databases = {
DEFAULT_DB_ALIAS: {
'ENGINE': 'django.db.backends.dummy',
},
}
if DEFAULT_DB_ALIAS not in self._databases:
raise ImproperlyConfigured("You must define a '%s' database" % DEFAULT_DB_ALIAS)
return self._databases
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy'
conn.setdefault('CONN_MAX_AGE', 0)
conn.setdefault('OPTIONS', {})
conn.setdefault('TIME_ZONE', 'UTC' if settings.USE_TZ else settings.TIME_ZONE)
for setting in ['NAME', 'USER', 'PASSWORD', 'HOST', 'PORT']:
conn.setdefault(setting, '')
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
test_settings = conn.setdefault('TEST', {})
for key in ['CHARSET', 'COLLATION', 'NAME', 'MIRROR']:
test_settings.setdefault(key, None)
def __getitem__(self, alias):
if hasattr(self._connections, alias):
return getattr(self._connections, alias)
self.ensure_defaults(alias)
self.prepare_test_settings(alias)
db = self.databases[alias]
backend = load_backend(db['ENGINE'])
conn = backend.DatabaseWrapper(db, alias)
setattr(self._connections, alias, conn)
return conn
def __setitem__(self, key, value):
setattr(self._connections, key, value)
def __delitem__(self, key):
delattr(self._connections, key)
def __iter__(self):
return iter(self.databases)
def all(self):
return [self[alias] for alias in self]
def close_all(self):
for alias in self:
try:
connection = getattr(self._connections, alias)
except AttributeError:
continue
connection.close()
class ConnectionRouter(object):
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@cached_property
def routers(self):
if self._routers is None:
self._routers = settings.DATABASE_ROUTERS
routers = []
for r in self._routers:
if isinstance(r, six.string_types):
router = import_string(r)()
else:
router = r
routers.append(router)
return routers
def _router_func(action):
def _route_db(self, model, **hints):
chosen_db = None
for router in self.routers:
try:
method = getattr(router, action)
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
chosen_db = method(model, **hints)
if chosen_db:
return chosen_db
instance = hints.get('instance')
if instance is not None and instance._state.db:
return instance._state.db
return DEFAULT_DB_ALIAS
return _route_db
db_for_read = _router_func('db_for_read')
db_for_write = _router_func('db_for_write')
def allow_relation(self, obj1, obj2, **hints):
for router in self.routers:
try:
method = router.allow_relation
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass
else:
allow = method(obj1, obj2, **hints)
if allow is not None:
return allow
return obj1._state.db == obj2._state.db
def allow_migrate(self, db, app_label, **hints):
for router in self.routers:
try:
method = router.allow_migrate
except AttributeError:
# If the router doesn't have a method, skip to the next one.
continue
argspec = inspect.getargspec(router.allow_migrate)
if len(argspec.args) == 3 and not argspec.keywords:
warnings.warn(
"The signature of allow_migrate has changed from "
"allow_migrate(self, db, model) to "
"allow_migrate(self, db, app_label, model_name=None, **hints). "
"Support for the old signature will be removed in Django 2.0.",
RemovedInDjango20Warning)
model = hints.get('model')
allow = None if model is None else method(db, model)
else:
allow = method(db, app_label, **hints)
if allow is not None:
return allow
return True
def allow_migrate_model(self, db, model):
return self.allow_migrate(
db,
model._meta.app_label,
model_name=model._meta.model_name,
model=model,
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]
# Copyright (c) 2015 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Sandberg
from multiprocessing import Process
import sys
import os
import m5
m5.util.addToPath('../configs/common')
_exit_normal = (
"target called exit()",
"m5_exit instruction encountered",
)
_exit_limit = (
"simulate() limit reached",
)
_exitcode_done = 0
_exitcode_fail = 1
_exitcode_checkpoint = 42
def _run_step(name, restore=None, interval=0.5):
"""
Instantiate (optionally from a checkpoint if restore is set to the
checkpoitn name) the system and run for interval seconds of
simulated time. At the end of the simulation interval, create a
checkpoint and exit.
As this function is intended to run in its own process using the
multiprocessing framework, the exit is a true call to exit which
terminates the process. Exit codes are used to pass information to
the parent.
"""
if restore is not None:
m5.instantiate(restore)
else:
m5.instantiate()
e = m5.simulate(m5.ticks.fromSeconds(interval))
cause = e.getCause()
if cause in _exit_limit:
m5.checkpoint(name)
sys.exit(_exitcode_checkpoint)
elif cause in _exit_normal:
sys.exit(_exitcode_done)
else:
print "Test failed: Unknown exit cause: %s" % cause
sys.exit(_exitcode_fail)
def run_test(root, interval=0.5, max_checkpoints=5):
"""
Run the simulated system for a fixed amount of time and take a
checkpoint, then restore from the same checkpoint and run until
the system calls m5 exit.
"""
cpt_name = os.path.join(m5.options.outdir, "test.cpt")
restore = None
for cpt_no in range(max_checkpoints):
# Create a checkpoint from a separate child process. This enables
# us to get back to a (mostly) pristine state and restart
# simulation from the checkpoint.
p = Process(target=_run_step,
args=(cpt_name, ),
kwargs={
"restore" : restore,
"interval" : interval,
})
p.start()
# Wait for the child to return
p.join()
# Restore from the checkpoint next iteration
restore = cpt_name
if p.exitcode == _exitcode_done:
print >> sys.stderr, "Test done."
sys.exit(0)
elif p.exitcode == _exitcode_checkpoint:
pass
else:
print >> sys.stderr, "Test failed."
sys.exit(1)
# Maximum number of checkpoints reached. Just run full-speed from
# now on.
m5.instantiate()
e = m5.simulate()
cause = e.getCause()
if cause in _exit_normal:
sys.exit(0)
else:
print "Test failed: Unknown exit cause: %s" % cause
sys.exit(1)
# -*- coding: utf-8 -*-
from cadnano.proxies.cnproxy import UndoCommand
from cadnano.decorators.insertion import Insertion
from cadnano.cntypes import (
StrandT
)
class AddInsertionCommand(UndoCommand):
def __init__(self, strand: StrandT, idx: int, length: int):
super(AddInsertionCommand, self).__init__("add insertion")
self._strand = strand
id_num = strand.idNum()
self._insertions = strand.part().insertions()[id_num]
self._idx = idx
self._length = length
self._insertion = Insertion(idx, length)
self._comp_strand = \
strand.strandSet().complementStrandSet().getStrand(idx)
# end def
def redo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertion
self._insertions[self._idx] = inst
strand.oligo()._incrementLength(inst.length(), emit_signals=True)
strand.strandInsertionAddedSignal.emit(strand, inst)
if c_strand:
c_strand.oligo()._incrementLength(inst.length(), emit_signals=True)
c_strand.strandInsertionAddedSignal.emit(c_strand, inst)
# end def
def undo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertion
strand.oligo()._decrementLength(inst.length(), emit_signals=True)
if c_strand:
c_strand.oligo()._decrementLength(inst.length(), emit_signals=True)
idx = self._idx
del self._insertions[idx]
strand.strandInsertionRemovedSignal.emit(strand, idx)
if c_strand:
c_strand.strandInsertionRemovedSignal.emit(c_strand, idx)
# end def
# end class
class RemoveInsertionCommand(UndoCommand):
def __init__(self, strand, idx):
super(RemoveInsertionCommand, self).__init__("remove insertion")
self._strand = strand
self._idx = idx
id_num = strand.idNum()
self._insertions = strand.part().insertions()[id_num]
self._insertion = self._insertions[idx]
self._comp_strand = \
strand.strandSet().complementStrandSet().getStrand(idx)
# end def
def redo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertion
strand.oligo()._decrementLength(inst.length(), emit_signals=True)
if c_strand:
c_strand.oligo()._decrementLength(inst.length(), emit_signals=True)
idx = self._idx
del self._insertions[idx]
strand.strandInsertionRemovedSignal.emit(strand, idx)
if c_strand:
c_strand.strandInsertionRemovedSignal.emit(c_strand, idx)
# end def
def undo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertion
strand.oligo()._incrementLength(inst.length(), emit_signals=True)
self._insertions[self._idx] = inst
strand.strandInsertionAddedSignal.emit(strand, inst)
if c_strand:
c_strand.oligo()._incrementLength(inst.length(), emit_signals=True)
c_strand.strandInsertionAddedSignal.emit(c_strand, inst)
# end def
# end class
class ChangeInsertionCommand(UndoCommand):
"""
Changes the length of an insertion to a non-zero value
the caller of this needs to handle the case where a zero length
is required and call RemoveInsertionCommand
"""
def __init__(self, strand, idx, new_length):
super(ChangeInsertionCommand, self).__init__("change insertion")
self._strand = strand
id_num = strand.idNum()
self._insertions = strand.part().insertions()[id_num]
self._idx = idx
self._new_length = new_length
self._old_length = self._insertions[idx].length()
self._comp_strand = \
strand.strandSet().complementStrandSet().getStrand(idx)
# end def
def redo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertions[self._idx]
inst.setLength(self._new_length, emit_signals=True)
strand.oligo()._incrementLength(self._new_length - self._old_length,
emit_signals=True)
strand.strandInsertionChangedSignal.emit(strand, inst)
if c_strand:
c_strand.oligo()._incrementLength(
self._new_length - self._old_length,
emit_signals=True)
c_strand.strandInsertionChangedSignal.emit(c_strand, inst)
# end def
def undo(self):
strand = self._strand
c_strand = self._comp_strand
inst = self._insertions[self._idx]
inst.setLength(self._old_length)
strand.oligo()._decrementLength(self._new_length - self._old_length,
emit_signals=True)
strand.strandInsertionChangedSignal.emit(strand, inst)
if c_strand:
c_strand.oligo()._decrementLength(
self._new_length - self._old_length,
emit_signals=True)
c_strand.strandInsertionChangedSignal.emit(c_strand, inst)
# end def
# end class
#!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack(" v documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
html_sidebars = {
'index': ['sidebarintro.html', 'sourcelink.html', 'searchbox.html'],
'**': ['sidebarlogo.html', 'localtoc.html', 'relations.html',
'sourcelink.html', 'searchbox.html']
}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'openerp-web-doc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'openerp-web-doc.tex', u'OpenERP Web Developers Documentation',
u'OpenERP s.a.', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'openerp-web-doc', u'OpenERP Web Developers Documentation',
[u'OpenERP s.a.'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'OpenERPWebDocumentation', u'OpenERP Web Developers Documentation',
u'OpenERP s.a.', 'OpenERPWebDocumentation', 'Developers documentation for the openerp-web project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
todo_include_todos = True
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('http://docs.python.org/', None),
'openerpserver': ('http://doc.openerp.com/trunk/developers/server', None),
}
# Copyright (C) 2011-2013 by
# Aric Hagberg
# Dan Schult
# Pieter Swart
# All rights reserved.
# BSD license.
from itertools import chain, count
import networkx as nx
__author__ = """Aric Hagberg """
__all__ = ['adjacency_data', 'adjacency_graph']
_attrs = dict(id='id', key='key')
def adjacency_data(G, attrs=_attrs):
"""Return data in adjacency format that is suitable for JSON serialization
and use in Javascript documents.
Parameters
----------
G : NetworkX graph
attrs : dict
A dictionary that contains two keys 'id' and 'key'. The corresponding
values provide the attribute names for storing NetworkX-internal graph
data. The values should be unique. Default value:
:samp:`dict(id='id', key='key')`.
If some user-defined graph data use these attribute names as data keys,
they may be silently dropped.
Returns
-------
data : dict
A dictionary with adjacency formatted data.
Raises
------
NetworkXError
If values in attrs are not unique.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.Graph([(1,2)])
>>> data = json_graph.adjacency_data(G)
To serialize with json
>>> import json
>>> s = json.dumps(data)
Notes
-----
Graph, node, and link attributes will be written when using this format
but attribute keys must be strings if you want to serialize the resulting
data with JSON.
The default value of attrs will be changed in a future release of NetworkX.
See Also
--------
adjacency_graph, node_link_data, tree_data
"""
multigraph = G.is_multigraph()
id_ = attrs['id']
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
key = None if not multigraph else attrs['key']
if id_ == key:
raise nx.NetworkXError('Attribute names are not unique.')
data = {}
data['directed'] = G.is_directed()
data['multigraph'] = multigraph
data['graph'] = list(G.graph.items())
data['nodes'] = []
data['adjacency'] = []
for n, nbrdict in G.adjacency():
data['nodes'].append(dict(chain(G.node[n].items(), [(id_, n)])))
adj = []
if multigraph:
for nbr, keys in nbrdict.items():
for k, d in keys.items():
adj.append(dict(chain(d.items(), [(id_, nbr), (key, k)])))
else:
for nbr, d in nbrdict.items():
adj.append(dict(chain(d.items(), [(id_, nbr)])))
data['adjacency'].append(adj)
return data
def adjacency_graph(data, directed=False, multigraph=True, attrs=_attrs):
"""Return graph from adjacency data format.
Parameters
----------
data : dict
Adjacency list formatted graph data
Returns
-------
G : NetworkX graph
A NetworkX graph object
directed : bool
If True, and direction not specified in data, return a directed graph.
multigraph : bool
If True, and multigraph not specified in data, return a multigraph.
attrs : dict
A dictionary that contains two keys 'id' and 'key'. The corresponding
values provide the attribute names for storing NetworkX-internal graph
data. The values should be unique. Default value:
:samp:`dict(id='id', key='key')`.
Examples
--------
>>> from networkx.readwrite import json_graph
>>> G = nx.Graph([(1,2)])
>>> data = json_graph.adjacency_data(G)
>>> H = json_graph.adjacency_graph(data)
Notes
-----
The default value of attrs will be changed in a future release of NetworkX.
See Also
--------
adjacency_graph, node_link_data, tree_data
"""
multigraph = data.get('multigraph', multigraph)
directed = data.get('directed', directed)
if multigraph:
graph = nx.MultiGraph()
else:
graph = nx.Graph()
if directed:
graph = graph.to_directed()
id_ = attrs['id']
# Allow 'key' to be omitted from attrs if the graph is not a multigraph.
key = None if not multigraph else attrs['key']
graph.graph = dict(data.get('graph', []))
mapping = []
for d in data['nodes']:
node_data = d.copy()
node = node_data.pop(id_)
mapping.append(node)
graph.add_node(node, attr_dict=node_data)
for i, d in enumerate(data['adjacency']):
source = mapping[i]
for tdata in d:
target_data = tdata.copy()
target = target_data.pop(id_)
if not multigraph:
graph.add_edge(source, target, attr_dict=tdata)
else:
ky = target_data.pop(key, None)
graph.add_edge(source, target, key=ky, attr_dict=tdata)
return graph
"""
author: DI WU
stevenwudi@gmail.com
"""
import getopt
import sys
# some configurations files for OBT experiments, originally, I would never do that this way of importing,
# it's simple way too ugly
from config import *
from scripts import *
from KCF_CNN_RNN import KCFTracker
def main(argv):
trackers = [KCFTracker(feature_type='cnn', load_model=False)]
#evalTypes = ['OPE', 'SRE', 'TRE']
evalTypes = ['OPE']
loadSeqs = 'TB50'
try:
opts, args = getopt.getopt(argv, "ht:e:s:", ["tracker=", "evaltype=", "sequence="])
except getopt.GetoptError:
print 'usage : run_trackers.py -t -s ' + '-e '
sys.exit(1)
for opt, arg in opts:
if opt == '-h':
print 'usage : run_trackers.py -t -s ' + '-e '
sys.exit(0)
elif opt in ("-t", "--tracker"):
trackers = [x.strip() for x in arg.split(',')]
# trackers = [arg]
elif opt in ("-s", "--sequence"):
loadSeqs = arg
if loadSeqs != 'All' and loadSeqs != 'all' and \
loadSeqs != 'tb50' and loadSeqs != 'tb100' and \
loadSeqs != 'cvpr13':
loadSeqs = [x.strip() for x in arg.split(',')]
elif opt in ("-e", "--evaltype"):
evalTypes = [x.strip() for x in arg.split(',')]
if SETUP_SEQ:
print 'Setup sequences ...'
butil.setup_seqs(loadSeqs)
print 'Starting benchmark for {0} trackers, evalTypes : {1}'.format(
len(trackers), evalTypes)
for evalType in evalTypes:
seqNames = butil.get_seq_names(loadSeqs)
seqs = butil.load_seq_configs(seqNames)
######################################################################
trackerResults = run_trackers(trackers, seqs, evalType, shiftTypeSet)
######################################################################
for tracker in trackers:
results = trackerResults[tracker]
if len(results) > 0:
######################################################################
evalResults, attrList = butil.calc_result(tracker, seqs, results, evalType)
######################################################################
print "Result of Sequences\t -- '{0}'".format(tracker.name)
for seq in seqs:
try:
print '\t\'{0}\'{1}'.format(
seq.name, " " * (12 - len(seq.name))),
print "\taveCoverage : {0:.3f}%".format(
sum(seq.aveCoverage) / len(seq.aveCoverage) * 100),
print "\taveErrCenter : {0:.3f}".format(
sum(seq.aveErrCenter) / len(seq.aveErrCenter))
except:
print '\t\'{0}\' ERROR!!'.format(seq.name)
print "Result of attributes\t -- '{0}'".format(tracker.name)
for attr in attrList:
print "\t\'{0}\'".format(attr.name),
print "\toverlap : {0:02.1f}%".format(attr.overlap),
print "\tfailures : {0:.1f}".format(attr.error)
if SAVE_RESULT:
butil.save_scores(attrList)
def run_trackers(trackers, seqs, evalType, shiftTypeSet):
tmpRes_path = RESULT_SRC.format('tmp/{0}/'.format(evalType))
if not os.path.exists(tmpRes_path):
os.makedirs(tmpRes_path)
numSeq = len(seqs)
trackerResults = dict((t, list()) for t in trackers)
##################################################
# chose sequence to run from below
##################################################
# we also collect data fro training here
import h5py
f = h5py.File("OBT50_scale_correct.hdf5", "w")
X_train = f.create_dataset("x_train", (26922, 60, 40), dtype='float', chunks=True)
y_train = f.create_dataset("y_train", (26922, 4), dtype='float', chunks=True)
count = 0
for idxSeq in range(0, numSeq):
s = seqs[idxSeq]
subSeqs, subAnno = butil.get_sub_seqs(s, 20.0, evalType)
for idxTrk in range(len(trackers)):
t = trackers[idxTrk]
if not OVERWRITE_RESULT:
trk_src = os.path.join(RESULT_SRC.format(evalType), t.name)
result_src = os.path.join(trk_src, s.name + '.json')
if os.path.exists(result_src):
seqResults = butil.load_seq_result(evalType, t, s.name)
trackerResults[t].append(seqResults)
continue
seqLen = len(subSeqs)
for idx in range(seqLen):
print '{0}_{1}, {2}_{3}:{4}/{5} - {6}'.format(
idxTrk + 1, t.feature_type, idxSeq + 1, s.name, idx + 1, seqLen, evalType)
rp = tmpRes_path + '_' + t.feature_type + '_' + str(idx + 1) + '/'
if SAVE_IMAGE and not os.path.exists(rp):
os.makedirs(rp)
subS = subSeqs[idx]
subS.name = s.name + '_' + str(idx)
####################
X_train, y_train, count = run_KCF_variant(t, subS, X_train, y_train, count)
####################
print("count %d"%count)
####################
X_train.resize(count - 1, axis=0)
y_train.resize(count - 1, axis=0)
f.close()
return trackerResults
def run_KCF_variant(tracker, seq, X_train, y_train, count):
from keras.preprocessing import image
start_time = time.time()
for frame in range(seq.endFrame - seq.startFrame):
if frame > 0:
img_rgb = img_rgb_next.copy()
else:
image_filename = seq.s_frames[frame]
image_path = os.path.join(seq.path, image_filename)
img_rgb = image.load_img(image_path)
img_rgb = image.img_to_array(img_rgb)
image_filename_next = seq.s_frames[frame+1]
image_path_next = os.path.join(seq.path, image_filename_next)
img_rgb_next = image.load_img(image_path_next)
img_rgb_next = image.img_to_array(img_rgb_next)
X_train, y_train, count = tracker.train_cnn(frame,
img_rgb,
seq.gtRect[frame],
img_rgb_next,
seq.gtRect[frame+1],
X_train, y_train, count
)
total_time = time.time() - start_time
tracker.fps = len(range(seq.endFrame - seq.startFrame)) / total_time
print("Frames-per-second:", tracker.fps)
return X_train, y_train, count
if __name__ == "__main__":
main(sys.argv[1:])
# -*- encoding: utf-8 -*-
from __future__ import unicode_literals
"""
LANG_INFO is a dictionary structure to provide meta information about languages.
About name_local: capitalize it as if your language name was appearing
inside a sentence in your language.
The 'fallback' key can be used to specify a special fallback logic which doesn't
follow the traditional 'fr-ca' -> 'fr' fallback logic.
"""
LANG_INFO = {
'af': {
'bidi': False,
'code': 'af',
'name': 'Afrikaans',
'name_local': 'Afrikaans',
},
'ar': {
'bidi': True,
'code': 'ar',
'name': 'Arabic',
'name_local': 'العربيّة',
},
'ast': {
'bidi': False,
'code': 'ast',
'name': 'Asturian',
'name_local': 'asturianu',
},
'az': {
'bidi': True,
'code': 'az',
'name': 'Azerbaijani',
'name_local': 'Azərbaycanca',
},
'be': {
'bidi': False,
'code': 'be',
'name': 'Belarusian',
'name_local': 'беларуская',
},
'bg': {
'bidi': False,
'code': 'bg',
'name': 'Bulgarian',
'name_local': 'български',
},
'bn': {
'bidi': False,
'code': 'bn',
'name': 'Bengali',
'name_local': 'বাংলা',
},
'br': {
'bidi': False,
'code': 'br',
'name': 'Breton',
'name_local': 'brezhoneg',
},
'bs': {
'bidi': False,
'code': 'bs',
'name': 'Bosnian',
'name_local': 'bosanski',
},
'ca': {
'bidi': False,
'code': 'ca',
'name': 'Catalan',
'name_local': 'català',
},
'cs': {
'bidi': False,
'code': 'cs',
'name': 'Czech',
'name_local': 'česky',
},
'cy': {
'bidi': False,
'code': 'cy',
'name': 'Welsh',
'name_local': 'Cymraeg',
},
'da': {
'bidi': False,
'code': 'da',
'name': 'Danish',
'name_local': 'dansk',
},
'de': {
'bidi': False,
'code': 'de',
'name': 'German',
'name_local': 'Deutsch',
},
'el': {
'bidi': False,
'code': 'el',
'name': 'Greek',
'name_local': 'Ελληνικά',
},
'en': {
'bidi': False,
'code': 'en',
'name': 'English',
'name_local': 'English',
},
'en-au': {
'bidi': False,
'code': 'en-au',
'name': 'Australian English',
'name_local': 'Australian English',
},
'en-gb': {
'bidi': False,
'code': 'en-gb',
'name': 'British English',
'name_local': 'British English',
},
'eo': {
'bidi': False,
'code': 'eo',
'name': 'Esperanto',
'name_local': 'Esperanto',
},
'es': {
'bidi': False,
'code': 'es',
'name': 'Spanish',
'name_local': 'español',
},
'es-ar': {
'bidi': False,
'code': 'es-ar',
'name': 'Argentinian Spanish',
'name_local': 'español de Argentina',
},
'es-mx': {
'bidi': False,
'code': 'es-mx',
'name': 'Mexican Spanish',
'name_local': 'español de Mexico',
},
'es-ni': {
'bidi': False,
'code': 'es-ni',
'name': 'Nicaraguan Spanish',
'name_local': 'español de Nicaragua',
},
'es-ve': {
'bidi': False,
'code': 'es-ve',
'name': 'Venezuelan Spanish',
'name_local': 'español de Venezuela',
},
'et': {
'bidi': False,
'code': 'et',
'name': 'Estonian',
'name_local': 'eesti',
},
'eu': {
'bidi': False,
'code': 'eu',
'name': 'Basque',
'name_local': 'Basque',
},
'fa': {
'bidi': True,
'code': 'fa',
'name': 'Persian',
'name_local': 'فارسی',
},
'fi': {
'bidi': False,
'code': 'fi',
'name': 'Finnish',
'name_local': 'suomi',
},
'fr': {
'bidi': False,
'code': 'fr',
'name': 'French',
'name_local': 'français',
},
'fy': {
'bidi': False,
'code': 'fy',
'name': 'Frisian',
'name_local': 'frysk',
},
'ga': {
'bidi': False,
'code': 'ga',
'name': 'Irish',
'name_local': 'Gaeilge',
},
'gl': {
'bidi': False,
'code': 'gl',
'name': 'Galician',
'name_local': 'galego',
},
'he': {
'bidi': True,
'code': 'he',
'name': 'Hebrew',
'name_local': 'עברית',
},
'hi': {
'bidi': False,
'code': 'hi',
'name': 'Hindi',
'name_local': 'Hindi',
},
'hr': {
'bidi': False,
'code': 'hr',
'name': 'Croatian',
'name_local': 'Hrvatski',
},
'hu': {
'bidi': False,
'code': 'hu',
'name': 'Hungarian',
'name_local': 'Magyar',
},
'ia': {
'bidi': False,
'code': 'ia',
'name': 'Interlingua',
'name_local': 'Interlingua',
},
'io': {
'bidi': False,
'code': 'io',
'name': 'Ido',
'name_local': 'ido',
},
'id': {
'bidi': False,
'code': 'id',
'name': 'Indonesian',
'name_local': 'Bahasa Indonesia',
},
'is': {
'bidi': False,
'code': 'is',
'name': 'Icelandic',
'name_local': 'Íslenska',
},
'it': {
'bidi': False,
'code': 'it',
'name': 'Italian',
'name_local': 'italiano',
},
'ja': {
'bidi': False,
'code': 'ja',
'name': 'Japanese',
'name_local': '日本語',
},
'ka': {
'bidi': False,
'code': 'ka',
'name': 'Georgian',
'name_local': 'ქართული',
},
'kk': {
'bidi': False,
'code': 'kk',
'name': 'Kazakh',
'name_local': 'Қазақ',
},
'km': {
'bidi': False,
'code': 'km',
'name': 'Khmer',
'name_local': 'Khmer',
},
'kn': {
'bidi': False,
'code': 'kn',
'name': 'Kannada',
'name_local': 'Kannada',
},
'ko': {
'bidi': False,
'code': 'ko',
'name': 'Korean',
'name_local': '한국어',
},
'lb': {
'bidi': False,
'code': 'lb',
'name': 'Luxembourgish',
'name_local': 'Lëtzebuergesch',
},
'lt': {
'bidi': False,
'code': 'lt',
'name': 'Lithuanian',
'name_local': 'Lietuviškai',
},
'lv': {
'bidi': False,
'code': 'lv',
'name': 'Latvian',
'name_local': 'latviešu',
},
'mk': {
'bidi': False,
'code': 'mk',
'name': 'Macedonian',
'name_local': 'Македонски',
},
'ml': {
'bidi': False,
'code': 'ml',
'name': 'Malayalam',
'name_local': 'Malayalam',
},
'mn': {
'bidi': False,
'code': 'mn',
'name': 'Mongolian',
'name_local': 'Mongolian',
},
'mr': {
'bidi': False,
'code': 'mr',
'name': 'Marathi',
'name_local': 'मराठी',
},
'my': {
'bidi': False,
'code': 'my',
'name': 'Burmese',
'name_local': 'မြန်မာဘာသာ',
},
'nb': {
'bidi': False,
'code': 'nb',
'name': 'Norwegian Bokmal',
'name_local': 'norsk (bokmål)',
},
'ne': {
'bidi': False,
'code': 'ne',
'name': 'Nepali',
'name_local': 'नेपाली',
},
'nl': {
'bidi': False,
'code': 'nl',
'name': 'Dutch',
'name_local': 'Nederlands',
},
'nn': {
'bidi': False,
'code': 'nn',
'name': 'Norwegian Nynorsk',
'name_local': 'norsk (nynorsk)',
},
'no': {
'bidi': False,
'code': 'no',
'name': 'Norwegian',
'name_local': 'norsk',
},
'os': {
'bidi': False,
'code': 'os',
'name': 'Ossetic',
'name_local': 'Ирон',
},
'pa': {
'bidi': False,
'code': 'pa',
'name': 'Punjabi',
'name_local': 'Punjabi',
},
'pl': {
'bidi': False,
'code': 'pl',
'name': 'Polish',
'name_local': 'polski',
},
'pt': {
'bidi': False,
'code': 'pt',
'name': 'Portuguese',
'name_local': 'Português',
},
'pt-br': {
'bidi': False,
'code': 'pt-br',
'name': 'Brazilian Portuguese',
'name_local': 'Português Brasileiro',
},
'ro': {
'bidi': False,
'code': 'ro',
'name': 'Romanian',
'name_local': 'Română',
},
'ru': {
'bidi': False,
'code': 'ru',
'name': 'Russian',
'name_local': 'Русский',
},
'sk': {
'bidi': False,
'code': 'sk',
'name': 'Slovak',
'name_local': 'slovenský',
},
'sl': {
'bidi': False,
'code': 'sl',
'name': 'Slovenian',
'name_local': 'Slovenščina',
},
'sq': {
'bidi': False,
'code': 'sq',
'name': 'Albanian',
'name_local': 'shqip',
},
'sr': {
'bidi': False,
'code': 'sr',
'name': 'Serbian',
'name_local': 'српски',
},
'sr-latn': {
'bidi': False,
'code': 'sr-latn',
'name': 'Serbian Latin',
'name_local': 'srpski (latinica)',
},
'sv': {
'bidi': False,
'code': 'sv',
'name': 'Swedish',
'name_local': 'svenska',
},
'sw': {
'bidi': False,
'code': 'sw',
'name': 'Swahili',
'name_local': 'Kiswahili',
},
'ta': {
'bidi': False,
'code': 'ta',
'name': 'Tamil',
'name_local': 'தமிழ்',
},
'te': {
'bidi': False,
'code': 'te',
'name': 'Telugu',
'name_local': 'తెలుగు',
},
'th': {
'bidi': False,
'code': 'th',
'name': 'Thai',
'name_local': 'ภาษาไทย',
},
'tr': {
'bidi': False,
'code': 'tr',
'name': 'Turkish',
'name_local': 'Türkçe',
},
'tt': {
'bidi': False,
'code': 'tt',
'name': 'Tatar',
'name_local': 'Татарча',
},
'udm': {
'bidi': False,
'code': 'udm',
'name': 'Udmurt',
'name_local': 'Удмурт',
},
'uk': {
'bidi': False,
'code': 'uk',
'name': 'Ukrainian',
'name_local': 'Українська',
},
'ur': {
'bidi': True,
'code': 'ur',
'name': 'Urdu',
'name_local': 'اردو',
},
'vi': {
'bidi': False,
'code': 'vi',
'name': 'Vietnamese',
'name_local': 'Tiếng Việt',
},
'zh-cn': {
'fallback': ['zh-hans'],
'bidi': False,
'code': 'zh-cn',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hans': {
'bidi': False,
'code': 'zh-hans',
'name': 'Simplified Chinese',
'name_local': '简体中文',
},
'zh-hant': {
'bidi': False,
'code': 'zh-hant',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
'zh-hk': {
'fallback': ['zh-hant'],
},
'zh-mo': {
'fallback': ['zh-hant'],
},
'zh-my': {
'fallback': ['zh-hans'],
},
'zh-sg': {
'fallback': ['zh-hans'],
},
'zh-tw': {
'fallback': ['zh-hant'],
'bidi': False,
'code': 'zh-tw',
'name': 'Traditional Chinese',
'name_local': '繁體中文',
},
}
# -*- encoding: utf-8 -*-
##############################################################################
#
# Author: Nicolas Bessi, Guewen Baconnier
# Copyright Camptocamp SA 2011
# SQL inspired from OpenERP original code
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
#
##############################################################################
# TODO refactor helper in order to act more like mixin
# By using properties we will have a more simple signature in fuctions
from collections import defaultdict
from datetime import datetime
from openerp.tools import DEFAULT_SERVER_DATE_FORMAT
from .common_reports import CommonReportHeaderWebkit
class CommonPartnersReportHeaderWebkit(CommonReportHeaderWebkit):
"""Define common helper for partner oriented financial report"""
######################################
# Account move line retrieval helper #
######################################
def get_partners_move_lines_ids(self, account_id, main_filter, start, stop,
target_move,
exclude_reconcile=False,
partner_filter=False):
filter_from = False
if main_filter in ('filter_period', 'filter_no'):
filter_from = 'period'
elif main_filter == 'filter_date':
filter_from = 'date'
if filter_from:
return self._get_partners_move_line_ids(
filter_from, account_id, start, stop, target_move,
exclude_reconcile=exclude_reconcile,
partner_filter=partner_filter)
def _get_first_special_period(self):
"""
Returns the browse record of the period with the `special` flag, which
is the special period of the first fiscal year used in the accounting.
i.e. it searches the first fiscal year with at least one journal entry,
and it returns the id of the first period for which `special` is True
in this fiscal year.
It is used for example in the partners reports, where we have to
include the first, and only the first opening period.
:return: browse record of the first special period.
"""
move_line_obj = self.pool.get('account.move.line')
first_entry_id = move_line_obj.search(
self.cr, self.uid, [], order='date ASC', limit=1)
# it means there is no entry at all, that's unlikely to happen, but
# it may so
if not first_entry_id:
return
first_entry = move_line_obj.browse(
self.cr, self.uid, first_entry_id[0])
fiscalyear = first_entry.period_id.fiscalyear_id
special_periods = [
period for period in fiscalyear.period_ids if period.special]
# so, we have no opening period on the first year, nothing to return
if not special_periods:
return
return min(special_periods,
key=lambda p: datetime.strptime(p.date_start,
DEFAULT_SERVER_DATE_FORMAT))
def _get_period_range_from_start_period(self, start_period,
include_opening=False,
fiscalyear=False,
stop_at_previous_opening=False):
"""We retrieve all periods before start period"""
periods = super(CommonPartnersReportHeaderWebkit, self).\
_get_period_range_from_start_period(
start_period,
include_opening=include_opening,
fiscalyear=fiscalyear,
stop_at_previous_opening=stop_at_previous_opening)
first_special = self._get_first_special_period()
if first_special and first_special.id not in periods:
periods.append(first_special.id)
return periods
def _get_query_params_from_periods(self, period_start, period_stop,
mode='exclude_opening'):
"""
Build the part of the sql "where clause" which filters on the selected
periods.
:param browse_record period_start: first period of the report to print
:param browse_record period_stop: last period of the report to print
:param str mode: deprecated
"""
# we do not want opening period so we exclude opening
periods = self.pool.get('account.period').build_ctx_periods(
self.cr, self.uid, period_start.id, period_stop.id)
if not periods:
return []
if mode != 'include_opening':
periods = self.exclude_opening_periods(periods)
search_params = {'period_ids': tuple(periods),
'date_stop': period_stop.date_stop}
sql_conditions = ""
if periods:
sql_conditions = " AND account_move_line.period_id in \
%(period_ids)s"
return sql_conditions, search_params
def _get_query_params_from_dates(self, date_start, date_stop, **args):
"""
Build the part of the sql where clause based on the dates to print.
:param str date_start: start date of the report to print
:param str date_stop: end date of the report to print
"""
periods = self._get_opening_periods()
if not periods:
periods = (-1,)
search_params = {'period_ids': tuple(periods),
'date_start': date_start,
'date_stop': date_stop}
sql_conditions = " AND account_move_line.period_id not \
in %(period_ids)s \
AND account_move_line.date between \
date(%(date_start)s) and date((%(date_stop)s))"
return sql_conditions, search_params
def _get_partners_move_line_ids(self, filter_from, account_id, start, stop,
target_move,
opening_mode='exclude_opening',
exclude_reconcile=False,
partner_filter=None):
"""
:param str filter_from: "periods" or "dates"
:param int account_id: id of the account where to search move lines
:param str or browse_record start: start date or start period
:param str or browse_record stop: stop date or stop period
:param str target_move: 'posted' or 'all'
:param opening_mode: deprecated
:param boolean exclude_reconcile: wether the reconciled entries are
filtred or not
:param list partner_filter: list of partner ids, will filter on their
move lines
"""
final_res = defaultdict(list)
sql_select = "SELECT account_move_line.id, \
account_move_line.partner_id FROM account_move_line"
sql_joins = ''
sql_where = " WHERE account_move_line.account_id = %(account_ids)s " \
" AND account_move_line.state = 'valid' "
method = getattr(self, '_get_query_params_from_' + filter_from + 's')
sql_conditions, search_params = method(start, stop)
sql_where += sql_conditions
if exclude_reconcile:
sql_where += (" AND ((account_move_line.reconcile_id IS NULL)"
" OR (account_move_line.reconcile_id IS NOT NULL \
AND account_move_line.last_rec_date > \
date(%(date_stop)s)))")
if partner_filter:
sql_where += " AND account_move_line.partner_id \
in %(partner_ids)s"
if target_move == 'posted':
sql_joins += "INNER JOIN account_move \
ON account_move_line.move_id = account_move.id"
sql_where += " AND account_move.state = %(target_move)s"
search_params.update({'target_move': target_move})
search_params.update({
'account_ids': account_id,
'partner_ids': tuple(partner_filter),
})
sql = ' '.join((sql_select, sql_joins, sql_where))
self.cursor.execute(sql, search_params)
res = self.cursor.dictfetchall()
if res:
for row in res:
final_res[row['partner_id']].append(row['id'])
return final_res
def _get_clearance_move_line_ids(self, move_line_ids, date_stop,
date_until):
if not move_line_ids:
return []
move_line_obj = self.pool.get('account.move.line')
# we do not use orm in order to gain perfo
# In this case I have to test the effective gain over an itteration
# Actually ORM does not allows distinct behavior
sql = "Select distinct reconcile_id from account_move_line \
where id in %s"
self.cursor.execute(sql, (tuple(move_line_ids),))
rec_ids = self.cursor.fetchall()
if rec_ids:
rec_ids = [x[0] for x in rec_ids]
l_ids = move_line_obj.search(self.cursor,
self.uid,
[('reconcile_id', 'in', rec_ids),
('date', '>=', date_stop),
('date', '<=', date_until)])
return l_ids
else:
return []
##############################################
# Initial Partner Balance helper #
##############################################
def _tree_move_line_ids(self, move_lines_data, key=None):
"""
move_lines_data must be a list of dict which contains at least keys :
- account_id
- partner_id
- other keys with values of the line
- if param key is defined, only this key will be inserted in the tree
returns a tree like
res[account_id.1][partner_id.1][move_line.1,
move_line.2]
[partner_id.2][move_line.3]
res[account_id.2][partner_id.1][move_line.4]
"""
res = defaultdict(dict)
for row in move_lines_data[:]:
account_id = row.pop('account_id')
partner_id = row.pop('partner_id')
if key:
res[account_id].setdefault(partner_id, []).append(row[key])
else:
res[account_id][partner_id] = row
return res
def _partners_initial_balance_line_ids(self, account_ids, start_period,
partner_filter,
exclude_reconcile=False,
force_period_ids=False,
date_stop=None):
# take ALL previous periods
period_ids = force_period_ids \
if force_period_ids \
else self._get_period_range_from_start_period(
start_period, fiscalyear=False, include_opening=False)
if not period_ids:
period_ids = [-1]
search_param = {
'date_start': start_period.date_start,
'period_ids': tuple(period_ids),
'account_ids': tuple(account_ids),
}
sql = ("SELECT ml.id, ml.account_id, ml.partner_id "
"FROM account_move_line ml "
"INNER JOIN account_account a "
"ON a.id = ml.account_id "
"WHERE ml.period_id in %(period_ids)s "
"AND ml.account_id in %(account_ids)s ")
if exclude_reconcile:
if not date_stop:
raise Exception(
"Missing \"date_stop\" to compute the open invoices.")
search_param.update({'date_stop': date_stop})
sql += ("AND ((ml.reconcile_id IS NULL) "
"OR (ml.reconcile_id IS NOT NULL \
AND ml.last_rec_date > date(%(date_stop)s))) ")
if partner_filter:
sql += "AND ml.partner_id in %(partner_ids)s "
search_param.update({'partner_ids': tuple(partner_filter)})
self.cursor.execute(sql, search_param)
return self.cursor.dictfetchall()
def _compute_partners_initial_balances(self, account_ids, start_period,
partner_filter=None,
exclude_reconcile=False,
force_period_ids=False):
"""We compute initial balance.
If form is filtered by date all initial balance are equal to 0
This function will sum pear and apple in currency amount if account
as no secondary currency"""
if isinstance(account_ids, (int, long)):
account_ids = [account_ids]
move_line_ids = self._partners_initial_balance_line_ids(
account_ids, start_period, partner_filter,
exclude_reconcile=exclude_reconcile,
force_period_ids=force_period_ids)
if not move_line_ids:
move_line_ids = [{'id': -1}]
sql = ("SELECT ml.account_id, ml.partner_id,"
" sum(ml.debit) as debit, sum(ml.credit) as credit,"
" sum(ml.debit-ml.credit) as init_balance,"
" CASE WHEN a.currency_id ISNULL THEN 0.0\
ELSE sum(ml.amount_currency) \
END as init_balance_currency, "
" c.name as currency_name "
"FROM account_move_line ml "
"INNER JOIN account_account a "
"ON a.id = ml.account_id "
"LEFT JOIN res_currency c "
"ON c.id = a.currency_id "
"WHERE ml.id in %(move_line_ids)s "
"GROUP BY ml.account_id, ml.partner_id, a.currency_id, c.name")
search_param = {
'move_line_ids': tuple([move_line['id'] for move_line in
move_line_ids])}
self.cursor.execute(sql, search_param)
res = self.cursor.dictfetchall()
return self._tree_move_line_ids(res)
############################################################
# Partner specific helper #
############################################################
def _order_partners(self, *args):
"""We get the partner linked to all current accounts that are used.
We also use ensure that partner are ordered by name
args must be list"""
res = []
partner_ids = []
for arg in args:
if arg:
partner_ids += arg
if not partner_ids:
return []
existing_partner_ids = [
partner_id for partner_id in partner_ids if partner_id]
if existing_partner_ids:
# We may use orm here as the performance optimization is not that
# big
sql = ("SELECT name|| ' ' ||CASE WHEN ref IS NOT NULL \
THEN '('||ref||')' \
ELSE '' END, id, ref, name"
" FROM res_partner \
WHERE id IN %s ORDER BY LOWER(name), ref")
self.cursor.execute(sql, (tuple(set(existing_partner_ids)),))
res = self.cursor.fetchall()
# move lines without partners, set None for empty partner
if not all(partner_ids):
res.append((None, None, None, None))
if not res:
return []
return res
# Copyright 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest_lib import exceptions as lib_exc
from tempest.api.compute.security_groups import base
from tempest.common.utils import data_utils
from tempest.common import waiters
from tempest import test
class SecurityGroupsTestJSON(base.BaseSecurityGroupsTest):
@classmethod
def setup_clients(cls):
super(SecurityGroupsTestJSON, cls).setup_clients()
cls.client = cls.security_groups_client
@test.attr(type='smoke')
@test.idempotent_id('eb2b087d-633d-4d0d-a7bd-9e6ba35b32de')
@test.services('network')
def test_security_groups_create_list_delete(self):
# Positive test:Should return the list of Security Groups
# Create 3 Security Groups
security_group_list = []
for i in range(3):
body = self.create_security_group()
security_group_list.append(body)
# Fetch all Security Groups and verify the list
# has all created Security Groups
fetched_list = self.client.list_security_groups()['security_groups']
# Now check if all the created Security Groups are in fetched list
missing_sgs = \
[sg for sg in security_group_list if sg not in fetched_list]
self.assertFalse(missing_sgs,
"Failed to find Security Group %s in fetched "
"list" % ', '.join(m_group['name']
for m_group in missing_sgs))
# Delete all security groups
for sg in security_group_list:
self.client.delete_security_group(sg['id'])
self.client.wait_for_resource_deletion(sg['id'])
# Now check if all the created Security Groups are deleted
fetched_list = self.client.list_security_groups()['security_groups']
deleted_sgs = \
[sg for sg in security_group_list if sg in fetched_list]
self.assertFalse(deleted_sgs,
"Failed to delete Security Group %s "
"list" % ', '.join(m_group['name']
for m_group in deleted_sgs))
@test.idempotent_id('ecc0da4a-2117-48af-91af-993cca39a615')
@test.services('network')
def test_security_group_create_get_delete(self):
# Security Group should be created, fetched and deleted
# with char space between name along with
# leading and trailing spaces
s_name = ' %s ' % data_utils.rand_name('securitygroup ')
securitygroup = self.create_security_group(name=s_name)
self.assertIn('name', securitygroup)
securitygroup_name = securitygroup['name']
self.assertEqual(securitygroup_name, s_name,
"The created Security Group name is "
"not equal to the requested name")
# Now fetch the created Security Group by its 'id'
fetched_group = (self.client.show_security_group(securitygroup['id'])
['security_group'])
self.assertEqual(securitygroup, fetched_group,
"The fetched Security Group is different "
"from the created Group")
self.client.delete_security_group(securitygroup['id'])
self.client.wait_for_resource_deletion(securitygroup['id'])
@test.idempotent_id('fe4abc0d-83f5-4c50-ad11-57a1127297a2')
@test.services('network')
def test_server_security_groups(self):
# Checks that security groups may be added and linked to a server
# and not deleted if the server is active.
# Create a couple security groups that we will use
# for the server resource this test creates
sg = self.create_security_group()
sg2 = self.create_security_group()
# Create server and add the security group created
# above to the server we just created
server_name = data_utils.rand_name('server')
server = self.create_test_server(name=server_name)
server_id = server['id']
waiters.wait_for_server_status(self.servers_client, server_id,
'ACTIVE')
self.servers_client.add_security_group(server_id, sg['name'])
# Check that we are not able to delete the security
# group since it is in use by an active server
self.assertRaises(lib_exc.BadRequest,
self.client.delete_security_group,
sg['id'])
# Reboot and add the other security group
self.servers_client.reboot_server(server_id, 'HARD')
waiters.wait_for_server_status(self.servers_client, server_id,
'ACTIVE')
self.servers_client.add_security_group(server_id, sg2['name'])
# Check that we are not able to delete the other security
# group since it is in use by an active server
self.assertRaises(lib_exc.BadRequest,
self.client.delete_security_group,
sg2['id'])
# Shutdown the server and then verify we can destroy the
# security groups, since no active server instance is using them
self.servers_client.delete_server(server_id)
waiters.wait_for_server_termination(self.servers_client, server_id)
self.client.delete_security_group(sg['id'])
self.client.delete_security_group(sg2['id'])
@test.idempotent_id('7d4e1d3c-3209-4d6d-b020-986304ebad1f')
@test.services('network')
def test_update_security_groups(self):
# Update security group name and description
# Create a security group
securitygroup = self.create_security_group()
self.assertIn('id', securitygroup)
securitygroup_id = securitygroup['id']
# Update the name and description
s_new_name = data_utils.rand_name('sg-hth')
s_new_des = data_utils.rand_name('description-hth')
self.client.update_security_group(securitygroup_id,
name=s_new_name,
description=s_new_des)
# get the security group
fetched_group = (self.client.show_security_group(securitygroup_id)
['security_group'])
self.assertEqual(s_new_name, fetched_group['name'])
self.assertEqual(s_new_des, fetched_group['description'])
# -*- coding: utf-8 -*-
###############################################################################
#
# SendEmail
# Sends an email using a specified email server.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class SendEmail(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the SendEmail Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(SendEmail, self).__init__(temboo_session, '/Library/Utilities/Email/SendEmail')
def new_input_set(self):
return SendEmailInputSet()
def _make_result_set(self, result, path):
return SendEmailResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return SendEmailChoreographyExecution(session, exec_id, path)
class SendEmailInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the SendEmail
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_AttachmentName(self, value):
"""
Set the value of the AttachmentName input for this Choreo. ((optional, string) The name of the file to attach to the email.)
"""
super(SendEmailInputSet, self)._set_input('AttachmentName', value)
def set_AttachmentURL(self, value):
"""
Set the value of the AttachmentURL input for this Choreo. ((optional, string) URL of a hosted file that you wish to add as an attachment. Use this instead of a normal Attachment.)
"""
super(SendEmailInputSet, self)._set_input('AttachmentURL', value)
def set_Attachment(self, value):
"""
Set the value of the Attachment input for this Choreo. ((optional, string) The Base64 encoded contents of the file to attach to the email. Use this instead of AttachmentURL.)
"""
super(SendEmailInputSet, self)._set_input('Attachment', value)
def set_BCC(self, value):
"""
Set the value of the BCC input for this Choreo. ((optional, string) An email address to BCC on the email you're sending. Can be a comma separated list of email addresses.)
"""
super(SendEmailInputSet, self)._set_input('BCC', value)
def set_CC(self, value):
"""
Set the value of the CC input for this Choreo. ((optional, string) An email address to CC on the email you're sending. Can be a comma separated list of email addresses.)
"""
super(SendEmailInputSet, self)._set_input('CC', value)
def set_FromAddress(self, value):
"""
Set the value of the FromAddress input for this Choreo. ((conditional, string) The name and email address that the message is being sent from.)
"""
super(SendEmailInputSet, self)._set_input('FromAddress', value)
def set_MessageBody(self, value):
"""
Set the value of the MessageBody input for this Choreo. ((required, string) The message body for the email.)
"""
super(SendEmailInputSet, self)._set_input('MessageBody', value)
def set_Password(self, value):
"""
Set the value of the Password input for this Choreo. ((required, password) The password for your email account.)
"""
super(SendEmailInputSet, self)._set_input('Password', value)
def set_Port(self, value):
"""
Set the value of the Port input for this Choreo. ((required, integer) Specify the port number (i.e. 25 or 465).)
"""
super(SendEmailInputSet, self)._set_input('Port', value)
def set_Server(self, value):
"""
Set the value of the Server input for this Choreo. ((required, string) The name or IP address of the email server.)
"""
super(SendEmailInputSet, self)._set_input('Server', value)
def set_Subject(self, value):
"""
Set the value of the Subject input for this Choreo. ((required, string) The subject line of the email.)
"""
super(SendEmailInputSet, self)._set_input('Subject', value)
def set_ToAddress(self, value):
"""
Set the value of the ToAddress input for this Choreo. ((required, string) The email address that you want to send an email to. Can be a comma separated list of email addresses.)
"""
super(SendEmailInputSet, self)._set_input('ToAddress', value)
def set_UseSSL(self, value):
"""
Set the value of the UseSSL input for this Choreo. ((optional, boolean) Set to 1 to connect over SSL. Set to 0 for no SSL. Defaults to 1.)
"""
super(SendEmailInputSet, self)._set_input('UseSSL', value)
def set_Username(self, value):
"""
Set the value of the Username input for this Choreo. ((required, string) Your username for your email account.)
"""
super(SendEmailInputSet, self)._set_input('Username', value)
class SendEmailResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the SendEmail Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Success(self):
"""
Retrieve the value for the "Success" output from this Choreo execution. ((boolean) Indicates the result of the SMTP operation. The value will be "true" for a successful request.)
"""
return self._output.get('Success', None)
class SendEmailChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return SendEmailResultSet(response, path)
# -*- coding: utf-8 -*-
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c) 2016 Thomas Krahn (@Nosmoht)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
try:
import json
except ImportError:
import simplejson as json
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.six import PY3
from ansible.module_utils.six.moves.urllib.parse import quote
from ansible.module_utils.urls import fetch_url
class IPAClient(object):
def __init__(self, module, host, port, protocol):
self.host = host
self.port = port
self.protocol = protocol
self.module = module
self.headers = None
def get_base_url(self):
return '%s://%s/ipa' % (self.protocol, self.host)
def get_json_url(self):
return '%s/session/json' % self.get_base_url()
def login(self, username, password):
url = '%s/session/login_password' % self.get_base_url()
data = 'user=%s&password=%s' % (quote(username, safe=''), quote(password, safe=''))
headers = {'referer': self.get_base_url(),
'Content-Type': 'application/x-www-form-urlencoded',
'Accept': 'text/plain'}
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(data), headers=headers)
status_code = info['status']
if status_code not in [200, 201, 204]:
self._fail('login', info['msg'])
self.headers = {'referer': self.get_base_url(),
'Content-Type': 'application/json',
'Accept': 'application/json',
'Cookie': resp.info().get('Set-Cookie')}
except Exception:
e = get_exception()
self._fail('login', str(e))
def _fail(self, msg, e):
if 'message' in e:
err_string = e.get('message')
else:
err_string = e
self.module.fail_json(msg='%s: %s' % (msg, err_string))
def _post_json(self, method, name, item=None):
if item is None:
item = {}
url = '%s/session/json' % self.get_base_url()
data = {'method': method, 'params': [[name], item]}
try:
resp, info = fetch_url(module=self.module, url=url, data=to_bytes(json.dumps(data)), headers=self.headers)
status_code = info['status']
if status_code not in [200, 201, 204]:
self._fail(method, info['msg'])
except Exception:
e = get_exception()
self._fail('post %s' % method, str(e))
if PY3:
charset = resp.headers.get_content_charset('latin-1')
else:
response_charset = resp.headers.getparam('charset')
if response_charset:
charset = response_charset
else:
charset = 'latin-1'
resp = json.loads(to_text(resp.read(), encoding=charset), encoding=charset)
err = resp.get('error')
if err is not None:
self._fail('repsonse %s' % method, err)
if 'result' in resp:
result = resp.get('result')
if 'result' in result:
result = result.get('result')
if isinstance(result, list):
if len(result) > 0:
return result[0]
else:
return {}
return result
return None
def get_diff(self, ipa_data, module_data):
result = []
for key in module_data.keys():
mod_value = module_data.get(key, None)
if isinstance(mod_value, list):
default = []
else:
default = None
ipa_value = ipa_data.get(key, default)
if isinstance(ipa_value, list) and not isinstance(mod_value, list):
mod_value = [mod_value]
if isinstance(ipa_value, list) and isinstance(mod_value, list):
mod_value = sorted(mod_value)
ipa_value = sorted(ipa_value)
if mod_value != ipa_value:
result.append(key)
return result
def modify_if_diff(self, name, ipa_list, module_list, add_method, remove_method, item=None):
changed = False
diff = list(set(ipa_list) - set(module_list))
if len(diff) > 0:
changed = True
if not self.module.check_mode:
if item:
remove_method(name=name, item={item: diff})
else:
remove_method(name=name, item=diff)
diff = list(set(module_list) - set(ipa_list))
if len(diff) > 0:
changed = True
if not self.module.check_mode:
if item:
add_method(name=name, item={item: diff})
else:
add_method(name=name, item=diff)
return changed
# -*- coding: utf-8 -*-
''' Test case for QObject::connectNotify()'''
import unittest
from PySide.QtCore import *
from helper import UsesQCoreApplication
def cute_slot():
pass
class Obj(QObject):
def __init__(self):
QObject.__init__(self)
self.con_notified = False
self.dis_notified = False
self.signal = ""
def connectNotify(self, signal):
self.con_notified = True
self.signal = signal
def disconnectNotify(self, signal):
self.dis_notified = True
def reset(self):
self.con_notified = False
self.dis_notified = False
class TestQObjectConnectNotify(UsesQCoreApplication):
'''Test case for QObject::connectNotify'''
def setUp(self):
UsesQCoreApplication.setUp(self)
self.called = False
def tearDown(self):
UsesQCoreApplication.tearDown(self)
def testBasic(self):
sender = Obj()
receiver = QObject()
sender.connect(SIGNAL("destroyed()"), receiver, SLOT("deleteLater()"))
self.assert_(sender.con_notified)
self.assertEqual(sender.signal, SIGNAL("destroyed()"))
sender.disconnect(SIGNAL("destroyed()"), receiver, SLOT("deleteLater()"))
self.assert_(sender.dis_notified)
def testPySignal(self):
sender = Obj()
receiver = QObject()
sender.connect(SIGNAL("foo()"), receiver, SLOT("deleteLater()"))
self.assert_(sender.con_notified)
sender.disconnect(SIGNAL("foo()"), receiver, SLOT("deleteLater()"))
self.assert_(sender.dis_notified)
def testPySlots(self):
sender = Obj()
receiver = QObject()
sender.connect(SIGNAL("destroyed()"), cute_slot)
self.assert_(sender.con_notified)
sender.disconnect(SIGNAL("destroyed()"), cute_slot)
self.assert_(sender.dis_notified)
def testpyAll(self):
sender = Obj()
receiver = QObject()
sender.connect(SIGNAL("foo()"), cute_slot)
self.assert_(sender.con_notified)
sender.disconnect(SIGNAL("foo()"), cute_slot)
self.assert_(sender.dis_notified)
if __name__ == '__main__':
unittest.main()
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.internet.posixbase} and supporting code.
"""
from __future__ import division, absolute_import
from twisted.python.compat import _PY3
from twisted.trial.unittest import TestCase
from twisted.internet.defer import Deferred
from twisted.internet.posixbase import PosixReactorBase, _Waker
from twisted.internet.protocol import ServerFactory
skipSockets = None
if _PY3:
skipSockets = "Re-enable when Python 3 port supports AF_UNIX"
else:
try:
from twisted.internet import unix
from twisted.test.test_unix import ClientProto
except ImportError:
skipSockets = "Platform does not support AF_UNIX sockets"
from twisted.internet.tcp import Port
from twisted.internet import reactor
class TrivialReactor(PosixReactorBase):
def __init__(self):
self._readers = {}
self._writers = {}
PosixReactorBase.__init__(self)
def addReader(self, reader):
self._readers[reader] = True
def removeReader(self, reader):
del self._readers[reader]
def addWriter(self, writer):
self._writers[writer] = True
def removeWriter(self, writer):
del self._writers[writer]
class PosixReactorBaseTests(TestCase):
"""
Tests for L{PosixReactorBase}.
"""
def _checkWaker(self, reactor):
self.assertIsInstance(reactor.waker, _Waker)
self.assertIn(reactor.waker, reactor._internalReaders)
self.assertIn(reactor.waker, reactor._readers)
def test_wakerIsInternalReader(self):
"""
When L{PosixReactorBase} is instantiated, it creates a waker and adds
it to its internal readers set.
"""
reactor = TrivialReactor()
self._checkWaker(reactor)
def test_removeAllSkipsInternalReaders(self):
"""
Any L{IReadDescriptors} in L{PosixReactorBase._internalReaders} are
left alone by L{PosixReactorBase._removeAll}.
"""
reactor = TrivialReactor()
extra = object()
reactor._internalReaders.add(extra)
reactor.addReader(extra)
reactor._removeAll(reactor._readers, reactor._writers)
self._checkWaker(reactor)
self.assertIn(extra, reactor._internalReaders)
self.assertIn(extra, reactor._readers)
def test_removeAllReturnsRemovedDescriptors(self):
"""
L{PosixReactorBase._removeAll} returns a list of removed
L{IReadDescriptor} and L{IWriteDescriptor} objects.
"""
reactor = TrivialReactor()
reader = object()
writer = object()
reactor.addReader(reader)
reactor.addWriter(writer)
removed = reactor._removeAll(
reactor._readers, reactor._writers)
self.assertEqual(set(removed), set([reader, writer]))
self.assertNotIn(reader, reactor._readers)
self.assertNotIn(writer, reactor._writers)
class TCPPortTests(TestCase):
"""
Tests for L{twisted.internet.tcp.Port}.
"""
if not isinstance(reactor, PosixReactorBase):
skip = "Non-posixbase reactor"
def test_connectionLostFailed(self):
"""
L{Port.stopListening} returns a L{Deferred} which errbacks if
L{Port.connectionLost} raises an exception.
"""
port = Port(12345, ServerFactory())
port.connected = True
port.connectionLost = lambda reason: 1 // 0
return self.assertFailure(port.stopListening(), ZeroDivisionError)
class TimeoutReportReactor(PosixReactorBase):
"""
A reactor which is just barely runnable and which cannot monitor any
readers or writers, and which fires a L{Deferred} with the timeout
passed to its C{doIteration} method as soon as that method is invoked.
"""
def __init__(self):
PosixReactorBase.__init__(self)
self.iterationTimeout = Deferred()
self.now = 100
def addReader(self, reader):
"""
Ignore the reader. This is necessary because the waker will be
added. However, we won't actually monitor it for any events.
"""
def removeAll(self):
"""
There are no readers or writers, so there is nothing to remove.
This will be called when the reactor stops, though, so it must be
implemented.
"""
return []
def seconds(self):
"""
Override the real clock with a deterministic one that can be easily
controlled in a unit test.
"""
return self.now
def doIteration(self, timeout):
d = self.iterationTimeout
if d is not None:
self.iterationTimeout = None
d.callback(timeout)
class IterationTimeoutTests(TestCase):
"""
Tests for the timeout argument L{PosixReactorBase.run} calls
L{PosixReactorBase.doIteration} with in the presence of various delayed
calls.
"""
def _checkIterationTimeout(self, reactor):
timeout = []
reactor.iterationTimeout.addCallback(timeout.append)
reactor.iterationTimeout.addCallback(lambda ignored: reactor.stop())
reactor.run()
return timeout[0]
def test_noCalls(self):
"""
If there are no delayed calls, C{doIteration} is called with a
timeout of C{None}.
"""
reactor = TimeoutReportReactor()
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, None)
def test_delayedCall(self):
"""
If there is a delayed call, C{doIteration} is called with a timeout
which is the difference between the current time and the time at
which that call is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 100)
def test_timePasses(self):
"""
If a delayed call is scheduled and then some time passes, the
timeout passed to C{doIteration} is reduced by the amount of time
which passed.
"""
reactor = TimeoutReportReactor()
reactor.callLater(100, lambda: None)
reactor.now += 25
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 75)
def test_multipleDelayedCalls(self):
"""
If there are several delayed calls, C{doIteration} is called with a
timeout which is the difference between the current time and the
time at which the earlier of the two calls is to run.
"""
reactor = TimeoutReportReactor()
reactor.callLater(50, lambda: None)
reactor.callLater(10, lambda: None)
reactor.callLater(100, lambda: None)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 10)
def test_resetDelayedCall(self):
"""
If a delayed call is reset, the timeout passed to C{doIteration} is
based on the interval between the time when reset is called and the
new delay of the call.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 25
call.reset(15)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 15)
def test_delayDelayedCall(self):
"""
If a delayed call is re-delayed, the timeout passed to
C{doIteration} is based on the remaining time before the call would
have been made and the additional amount of time passed to the delay
method.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
reactor.now += 10
call.delay(20)
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, 60)
def test_cancelDelayedCall(self):
"""
If the only delayed call is canceled, C{None} is the timeout passed
to C{doIteration}.
"""
reactor = TimeoutReportReactor()
call = reactor.callLater(50, lambda: None)
call.cancel()
timeout = self._checkIterationTimeout(reactor)
self.assertEqual(timeout, None)
class ConnectedDatagramPortTestCase(TestCase):
"""
Test connected datagram UNIX sockets.
"""
if skipSockets is not None:
skip = skipSockets
def test_connectionFailedDoesntCallLoseConnection(self):
"""
L{ConnectedDatagramPort} does not call the deprecated C{loseConnection}
in L{ConnectedDatagramPort.connectionFailed}.
"""
def loseConnection():
"""
Dummy C{loseConnection} method. C{loseConnection} is deprecated and
should not get called.
"""
self.fail("loseConnection is deprecated and should not get called.")
port = unix.ConnectedDatagramPort(None, ClientProto())
port.loseConnection = loseConnection
port.connectionFailed("goodbye")
def test_connectionFailedCallsStopListening(self):
"""
L{ConnectedDatagramPort} calls L{ConnectedDatagramPort.stopListening}
instead of the deprecated C{loseConnection} in
L{ConnectedDatagramPort.connectionFailed}.
"""
self.called = False
def stopListening():
"""
Dummy C{stopListening} method.
"""
self.called = True
port = unix.ConnectedDatagramPort(None, ClientProto())
port.stopListening = stopListening
port.connectionFailed("goodbye")
self.assertEqual(self.called, True)
"""Base email backend class."""
class BaseEmailBackend(object):
"""
Base class for email backend implementations.
Subclasses must at least overwrite send_messages().
open() and close() can be called indirectly by using a backend object as a
context manager:
with backend as connection:
# do something with connection
pass
"""
def __init__(self, fail_silently=False, **kwargs):
self.fail_silently = fail_silently
def open(self):
"""Open a network connection.
This method can be overwritten by backend implementations to
open a network connection.
It's up to the backend implementation to track the status of
a network connection if it's needed by the backend.
This method can be called by applications to force a single
network connection to be used when sending mails. See the
send_messages() method of the SMTP backend for a reference
implementation.
The default implementation does nothing.
"""
pass
def close(self):
"""Close a network connection."""
pass
def __enter__(self):
try:
self.open()
except Exception:
self.close()
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def send_messages(self, email_messages):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
raise NotImplementedError('subclasses of BaseEmailBackend must override send_messages() method')
# Some simple queue module tests, plus some failure conditions
# to ensure the Queue locks remain stable.
import queue
import time
import unittest
from test import support
threading = support.import_module('threading')
QUEUE_SIZE = 5
def qfull(q):
return q.maxsize > 0 and q.qsize() == q.maxsize
# A thread to run a function that unclogs a blocked Queue.
class _TriggerThread(threading.Thread):
def __init__(self, fn, args):
self.fn = fn
self.args = args
self.startedEvent = threading.Event()
threading.Thread.__init__(self)
def run(self):
# The sleep isn't necessary, but is intended to give the blocking
# function in the main thread a chance at actually blocking before
# we unclog it. But if the sleep is longer than the timeout-based
# tests wait in their blocking functions, those tests will fail.
# So we give them much longer timeout values compared to the
# sleep here (I aimed at 10 seconds for blocking functions --
# they should never actually wait that long - they should make
# progress as soon as we call self.fn()).
time.sleep(0.1)
self.startedEvent.set()
self.fn(*self.args)
# Execute a function that blocks, and in a separate thread, a function that
# triggers the release. Returns the result of the blocking function. Caution:
# block_func must guarantee to block until trigger_func is called, and
# trigger_func must guarantee to change queue state so that block_func can make
# enough progress to return. In particular, a block_func that just raises an
# exception regardless of whether trigger_func is called will lead to
# timing-dependent sporadic failures, and one of those went rarely seen but
# undiagnosed for years. Now block_func must be unexceptional. If block_func
# is supposed to raise an exception, call do_exceptional_blocking_test()
# instead.
class BlockingTestMixin:
def do_blocking_test(self, block_func, block_args, trigger_func, trigger_args):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
self.result = block_func(*block_args)
# If block_func returned before our thread made the call, we failed!
if not self.t.startedEvent.is_set():
self.fail("blocking function '%r' appeared not to block" %
block_func)
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
return self.result
# Call this instead if block_func is supposed to raise an exception.
def do_exceptional_blocking_test(self,block_func, block_args, trigger_func,
trigger_args, expected_exception_class):
self.t = _TriggerThread(trigger_func, trigger_args)
self.t.start()
try:
try:
block_func(*block_args)
except expected_exception_class:
raise
else:
self.fail("expected exception of kind %r" %
expected_exception_class)
finally:
self.t.join(10) # make sure the thread terminates
if self.t.is_alive():
self.fail("trigger function '%r' appeared to not return" %
trigger_func)
if not self.t.startedEvent.is_set():
self.fail("trigger thread ended but event never set")
class BaseQueueTest(unittest.TestCase, BlockingTestMixin):
def setUp(self):
self.cum = 0
self.cumlock = threading.Lock()
def simple_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
self.assertTrue(q.empty())
self.assertFalse(q.full())
# I guess we better check things actually queue correctly a little :)
q.put(111)
q.put(333)
q.put(222)
target_order = dict(Queue = [111, 333, 222],
LifoQueue = [222, 333, 111],
PriorityQueue = [111, 222, 333])
actual_order = [q.get(), q.get(), q.get()]
self.assertEqual(actual_order, target_order[q.__class__.__name__],
"Didn't seem to queue the correct data!")
for i in range(QUEUE_SIZE-1):
q.put(i)
self.assertTrue(q.qsize(), "Queue should not be empty")
self.assertTrue(not qfull(q), "Queue should not be full")
last = 2 * QUEUE_SIZE
full = 3 * 2 * QUEUE_SIZE
q.put(last)
self.assertTrue(qfull(q), "Queue should be full")
self.assertFalse(q.empty())
self.assertTrue(q.full())
try:
q.put(full, block=0)
self.fail("Didn't appear to block with a full queue")
except queue.Full:
pass
try:
q.put(full, timeout=0.01)
self.fail("Didn't appear to time-out with a full queue")
except queue.Full:
pass
# Test a blocking put
self.do_blocking_test(q.put, (full,), q.get, ())
self.do_blocking_test(q.put, (full, True, 10), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
try:
q.get(block=0)
self.fail("Didn't appear to block with an empty queue")
except queue.Empty:
pass
try:
q.get(timeout=0.01)
self.fail("Didn't appear to time-out with an empty queue")
except queue.Empty:
pass
# Test a blocking get
self.do_blocking_test(q.get, (), q.put, ('empty',))
self.do_blocking_test(q.get, (True, 10), q.put, ('empty',))
def worker(self, q):
while True:
x = q.get()
if x < 0:
q.task_done()
return
with self.cumlock:
self.cum += x
q.task_done()
def queue_join_test(self, q):
self.cum = 0
for i in (0,1):
threading.Thread(target=self.worker, args=(q,)).start()
for i in range(100):
q.put(i)
q.join()
self.assertEqual(self.cum, sum(range(100)),
"q.join() did not block until all tasks were done")
for i in (0,1):
q.put(-1) # instruct the threads to close
q.join() # verify that you can join twice
def test_queue_task_done(self):
# Test to make sure a queue task completed successfully.
q = self.type2test()
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_queue_join(self):
# Test that a queue join()s successfully, and before anything else
# (done twice for insurance).
q = self.type2test()
self.queue_join_test(q)
self.queue_join_test(q)
try:
q.task_done()
except ValueError:
pass
else:
self.fail("Did not detect task count going negative")
def test_simple_queue(self):
# Do it a couple of times on the same queue.
# Done twice to make sure works with same instance reused.
q = self.type2test(QUEUE_SIZE)
self.simple_queue_test(q)
self.simple_queue_test(q)
def test_negative_timeout_raises_exception(self):
q = self.type2test(QUEUE_SIZE)
with self.assertRaises(ValueError):
q.put(1, timeout=-1)
with self.assertRaises(ValueError):
q.get(1, timeout=-1)
def test_nowait(self):
q = self.type2test(QUEUE_SIZE)
for i in range(QUEUE_SIZE):
q.put_nowait(1)
with self.assertRaises(queue.Full):
q.put_nowait(1)
for i in range(QUEUE_SIZE):
q.get_nowait()
with self.assertRaises(queue.Empty):
q.get_nowait()
def test_shrinking_queue(self):
# issue 10110
q = self.type2test(3)
q.put(1)
q.put(2)
q.put(3)
with self.assertRaises(queue.Full):
q.put_nowait(4)
self.assertEqual(q.qsize(), 3)
q.maxsize = 2 # shrink the queue
with self.assertRaises(queue.Full):
q.put_nowait(4)
class QueueTest(BaseQueueTest):
type2test = queue.Queue
class LifoQueueTest(BaseQueueTest):
type2test = queue.LifoQueue
class PriorityQueueTest(BaseQueueTest):
type2test = queue.PriorityQueue
# A Queue subclass that can provoke failure at a moment's notice :)
class FailingQueueException(Exception):
pass
class FailingQueue(queue.Queue):
def __init__(self, *args):
self.fail_next_put = False
self.fail_next_get = False
queue.Queue.__init__(self, *args)
def _put(self, item):
if self.fail_next_put:
self.fail_next_put = False
raise FailingQueueException("You Lose")
return queue.Queue._put(self, item)
def _get(self):
if self.fail_next_get:
self.fail_next_get = False
raise FailingQueueException("You Lose")
return queue.Queue._get(self)
class FailingQueueTest(unittest.TestCase, BlockingTestMixin):
def failing_queue_test(self, q):
if q.qsize():
raise RuntimeError("Call this function with an empty queue")
for i in range(QUEUE_SIZE-1):
q.put(i)
# Test a failing non-blocking put.
q.fail_next_put = True
try:
q.put("oops", block=0)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.fail_next_put = True
try:
q.put("oops", timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a failing blocking put
q.fail_next_put = True
try:
self.do_blocking_test(q.put, ("full",), q.get, ())
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
# Test a failing timeout put
q.fail_next_put = True
try:
self.do_exceptional_blocking_test(q.put, ("full", True, 10), q.get, (),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# Check the Queue isn't damaged.
# put failed, but get succeeded - re-add
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
q.get()
self.assertTrue(not qfull(q), "Queue should not be full")
q.put("last")
self.assertTrue(qfull(q), "Queue should be full")
# Test a blocking put
self.do_blocking_test(q.put, ("full",), q.get, ())
# Empty it
for i in range(QUEUE_SIZE):
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.put("first")
q.fail_next_get = True
try:
q.get()
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.fail_next_get = True
try:
q.get(timeout=0.1)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
q.fail_next_get = True
try:
self.do_exceptional_blocking_test(q.get, (), q.put, ('empty',),
FailingQueueException)
self.fail("The queue didn't fail when it should have")
except FailingQueueException:
pass
# put succeeded, but get failed.
self.assertTrue(q.qsize(), "Queue should not be empty")
q.get()
self.assertTrue(not q.qsize(), "Queue should be empty")
def test_failing_queue(self):
# Test to make sure a queue is functioning correctly.
# Done twice to the same instance.
q = FailingQueue(QUEUE_SIZE)
self.failing_queue_test(q)
self.failing_queue_test(q)
def test_main():
support.run_unittest(QueueTest, LifoQueueTest, PriorityQueueTest,
FailingQueueTest)
if __name__ == "__main__":
test_main()
"""
Storages used to assist in the deprecation of contrib.auth User messages.
"""
from django.contrib.messages import constants
from django.contrib.messages.storage.base import BaseStorage, Message
from django.contrib.auth.models import User
from django.contrib.messages.storage.fallback import FallbackStorage
class UserMessagesStorage(BaseStorage):
"""
Retrieves messages from the User, using the legacy user.message_set API.
This storage is "read-only" insofar as it can only retrieve and delete
messages, not store them.
"""
session_key = '_messages'
def _get_messages_queryset(self):
"""
Returns the QuerySet containing all user messages (or ``None`` if
request.user is not a contrib.auth User).
"""
user = getattr(self.request, 'user', None)
if isinstance(user, User):
return user._message_set.all()
def add(self, *args, **kwargs):
raise NotImplementedError('This message storage is read-only.')
def _get(self, *args, **kwargs):
"""
Retrieves a list of messages assigned to the User. This backend never
stores anything, so all_retrieved is assumed to be False.
"""
queryset = self._get_messages_queryset()
if queryset is None:
# This is a read-only and optional storage, so to ensure other
# storages will also be read if used with FallbackStorage an empty
# list is returned rather than None.
return [], False
messages = []
for user_message in queryset:
messages.append(Message(constants.INFO, user_message.message))
return messages, False
def _store(self, messages, *args, **kwargs):
"""
Removes any messages assigned to the User and returns the list of
messages (since no messages are stored in this read-only storage).
"""
queryset = self._get_messages_queryset()
if queryset is not None:
queryset.delete()
return messages
class LegacyFallbackStorage(FallbackStorage):
"""
Works like ``FallbackStorage`` but also handles retrieving (and clearing)
contrib.auth User messages.
"""
storage_classes = (UserMessagesStorage,) + FallbackStorage.storage_classes
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Franck Cuny , 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pprint
USER_AGENT_PRODUCT="Ansible-gce"
USER_AGENT_VERSION="v1"
def gce_connect(module, provider=None):
"""Return a Google Cloud Engine connection."""
service_account_email = module.params.get('service_account_email', None)
pem_file = module.params.get('pem_file', None)
project_id = module.params.get('project_id', None)
# If any of the values are not given as parameters, check the appropriate
# environment variables.
if not service_account_email:
service_account_email = os.environ.get('GCE_EMAIL', None)
if not project_id:
project_id = os.environ.get('GCE_PROJECT', None)
if not pem_file:
pem_file = os.environ.get('GCE_PEM_FILE_PATH', None)
# If we still don't have one or more of our credentials, attempt to
# get the remaining values from the libcloud secrets file.
if service_account_email is None or pem_file is None:
try:
import secrets
except ImportError:
secrets = None
if hasattr(secrets, 'GCE_PARAMS'):
if not service_account_email:
service_account_email = secrets.GCE_PARAMS[0]
if not pem_file:
pem_file = secrets.GCE_PARAMS[1]
keyword_params = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
if not project_id:
project_id = keyword_params.get('project', None)
# If we *still* don't have the credentials we need, then it's time to
# just fail out.
if service_account_email is None or pem_file is None or project_id is None:
module.fail_json(msg='Missing GCE connection parameters in libcloud '
'secrets file.')
return None
# Allow for passing in libcloud Google DNS (e.g, Provider.GOOGLE)
if provider is None:
provider = Provider.GCE
try:
gce = get_driver(provider)(service_account_email, pem_file,
datacenter=module.params.get('zone', None),
project=project_id)
gce.connection.user_agent_append("%s/%s" % (
USER_AGENT_PRODUCT, USER_AGENT_VERSION))
except (RuntimeError, ValueError), e:
module.fail_json(msg=str(e), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
return gce
def unexpected_error_msg(error):
"""Create an error string based on passed in error."""
return 'Unexpected response: ' + pprint.pformat(vars(error))
#!/usr/bin/env python
#
# Wrapper script for Java Conda packages that ensures that the java runtime
# is invoked with the right options. Adapted from the bash script (http://stackoverflow.com/questions/59895/can-a-bash-script-tell-what-directory-its-stored-in/246128#246128).
#
# Program Parameters
#
import os
import subprocess
import sys
import shutil
from os import access
from os import getenv
from os import X_OK
jar_file = 'mpa-portable-1.4.1.jar'
default_jvm_mem_opts = ['-Xms512m', '-Xmx1g']
# !!! End of parameter section. No user-serviceable code below this line !!!
def real_dirname(path):
"""Return the symlink-resolved, canonicalized directory-portion of path."""
return os.path.dirname(os.path.realpath(path))
def java_executable():
"""Return the executable name of the Java interpreter."""
java_home = getenv('JAVA_HOME')
java_bin = os.path.join('bin', 'java')
if java_home and access(os.path.join(java_home, java_bin), X_OK):
return os.path.join(java_home, java_bin)
else:
return 'java'
def jvm_opts(argv):
"""Construct list of Java arguments based on our argument list.
The argument list passed in argv must not include the script name.
The return value is a 3-tuple lists of strings of the form:
(memory_options, prop_options, passthrough_options)
"""
mem_opts = []
prop_opts = []
pass_args = []
exec_dir = None
for arg in argv:
if arg.startswith('-D'):
prop_opts.append(arg)
elif arg.startswith('-XX'):
prop_opts.append(arg)
elif arg.startswith('-Xm'):
mem_opts.append(arg)
elif arg.startswith('--exec_dir='):
exec_dir = arg.split('=')[1].strip('"').strip("'")
if not os.path.exists(exec_dir):
shutil.copytree(real_dirname(sys.argv[0]), exec_dir, symlinks=False, ignore=None)
else:
pass_args.append(arg)
# In the original shell script the test coded below read:
# if [ "$jvm_mem_opts" == "" ] && [ -z ${_JAVA_OPTIONS+x} ]
# To reproduce the behaviour of the above shell code fragment
# it is important to explictly check for equality with None
# in the second condition, so a null envar value counts as True!
if mem_opts == [] and getenv('_JAVA_OPTIONS') is None:
mem_opts = default_jvm_mem_opts
return (mem_opts, prop_opts, pass_args, exec_dir)
def main():
java = java_executable()
"""
If the exec_dir dies not exist,
we copy the jar file, lib, and resources to the exec_dir directory.
"""
(mem_opts, prop_opts, pass_args, exec_dir) = jvm_opts(sys.argv[1:])
jar_dir = exec_dir if exec_dir else real_dirname(sys.argv[0])
jar_path = os.path.join(jar_dir, jar_file)
if pass_args != [] and '-get_jar_dir' in pass_args:
print(jar_dir)
return
if pass_args != [] and pass_args[0].startswith('de'):
jar_arg = '-cp'
else:
jar_arg = '-jar'
java_args = [java] + mem_opts + prop_opts + [jar_arg] + [jar_path] + pass_args
sys.exit(subprocess.call(java_args))
if __name__ == '__main__':
main()
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Cursor Movement {{{#
class CursorMovement_Multiline_ECR(_VimTest):
snippets = ('test', r"$1 ${1:a tab}")
keys = 'test' + EX + 'this is something\nvery nice\nnot' + JF + 'more text'
wanted = 'this is something\nvery nice\nnot ' \
'this is something\nvery nice\nnotmore text'
class CursorMovement_BS_InEditMode(_VimTest):
def _extra_vim_config(self, vim_config):
vim_config.append('set backspace=eol,indent,start')
snippets = ('\n\t
$1
\n\t$2\n\n$3')
keys = '
# Minor fixes by Fabian Pedregosa
# Amit Aides
# Yehuda Finkelstein
# Lars Buitinck
# Jan Hendrik Metzen
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide `.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide `.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide `.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
"""
Utilities for running shell scripts and interacting with the terminal
"""
import subprocess as sp
import sys
def run_shell_command(cmd):
"""
Runs cmd as a shell command. Waits for it to finish executing,
then returns all output printed to standard error and standard out,
and the return code.
Parameters
----------
cmd : str
The shell command to run
Returns
-------
output : str
The string output of the process
rc : WRITEME
The numeric return code of the process
"""
child = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.STDOUT)
output = child.communicate()[0]
rc = child.returncode
return output, rc
def print_progression(percent, width=50, delimiters=['[', ']'], symbol='#'):
"""
Prints a progress bar to the command line
Parameters
----------
percent : float
Completion value between 0 and 100
width : int, optional
Number of symbols corresponding to a 100 percent completion
delimiters : list of str, optional
Character delimiters for the progression bar
symbol : str, optional
Symbol representing one unit of progression
"""
n_symbols = int(percent/100.0*width)
progress_bar = delimiters[0] + n_symbols * symbol \
+ (width - n_symbols) * ' ' \
+ delimiters[1] + " "
sys.stdout.write("\r" + progress_bar + str(percent) + "%")
sys.stdout.flush()
from collections import namedtuple
from vim_turing_machine.constants import BACKWARDS
from vim_turing_machine.constants import DO_NOT_MOVE
from vim_turing_machine.constants import FORWARDS
from vim_turing_machine.constants import INVALID_STATE_CHARACTERS
from vim_turing_machine.constants import VALID_CHARACTERS
class StateTransition(namedtuple('StateTransition', [
'previous_state',
'previous_character',
'next_state',
'next_character',
'tape_pointer_direction',
])):
def validate(self):
assert self.tape_pointer_direction in (FORWARDS, DO_NOT_MOVE, BACKWARDS)
assert self.previous_character in VALID_CHARACTERS
assert self.next_character in VALID_CHARACTERS
for invalid_char in INVALID_STATE_CHARACTERS:
if invalid_char in self.previous_state:
raise AssertionError('{} is in {}'.format(invalid_char, self.previous_state))
if invalid_char in self.next_state:
raise AssertionError('{} is in {}'.format(invalid_char, self.next_state))
# -*- coding: utf-8 -*-
"""
werkzeug.testsuite.datastructures
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the functionality of the provided Werkzeug
datastructures.
TODO:
- FileMultiDict
- Immutable types undertested
- Split up dict tests
:copyright: (c) 2013 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import with_statement
import unittest
import pickle
from contextlib import contextmanager
from copy import copy
from werkzeug import datastructures
from werkzeug._compat import iterkeys, itervalues, iteritems, iterlists, \
iterlistvalues, text_type
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.exceptions import BadRequestKeyError
class NativeItermethodsTestCase(WerkzeugTestCase):
def test_basic(self):
@datastructures.native_itermethods(['keys', 'values', 'items'])
class StupidDict(object):
def keys(self, multi=1):
return iter(['a', 'b', 'c'] * multi)
def values(self, multi=1):
return iter([1, 2, 3] * multi)
def items(self, multi=1):
return iter(zip(iterkeys(self, multi=multi),
itervalues(self, multi=multi)))
d = StupidDict()
expected_keys = ['a', 'b', 'c']
expected_values = [1, 2, 3]
expected_items = list(zip(expected_keys, expected_values))
self.assert_equal(list(iterkeys(d)), expected_keys)
self.assert_equal(list(itervalues(d)), expected_values)
self.assert_equal(list(iteritems(d)), expected_items)
self.assert_equal(list(iterkeys(d, 2)), expected_keys * 2)
self.assert_equal(list(itervalues(d, 2)), expected_values * 2)
self.assert_equal(list(iteritems(d, 2)), expected_items * 2)
class MutableMultiDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_pickle(self):
cls = self.storage_class
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
d = cls()
d.setlist(b'foo', [1, 2, 3, 4])
d.setlist(b'bar', b'foo bar baz'.split())
s = pickle.dumps(d, protocol)
ud = pickle.loads(s)
self.assert_equal(type(ud), type(d))
self.assert_equal(ud, d)
self.assert_equal(pickle.loads(
s.replace(b'werkzeug.datastructures', b'werkzeug')), d)
ud[b'newkey'] = b'bla'
self.assert_not_equal(ud, d)
def test_basic_interface(self):
md = self.storage_class()
assert isinstance(md, dict)
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
# simple getitem gives the first value
self.assert_equal(md['a'], 1)
self.assert_equal(md['c'], 3)
with self.assert_raises(KeyError):
md['e']
self.assert_equal(md.get('a'), 1)
# list getitem
self.assert_equal(md.getlist('a'), [1, 2, 1, 3])
self.assert_equal(md.getlist('d'), [3, 4])
# do not raise if key not found
self.assert_equal(md.getlist('x'), [])
# simple setitem overwrites all values
md['a'] = 42
self.assert_equal(md.getlist('a'), [42])
# list setitem
md.setlist('a', [1, 2, 3])
self.assert_equal(md['a'], 1)
self.assert_equal(md.getlist('a'), [1, 2, 3])
# verify that it does not change original lists
l1 = [1, 2, 3]
md.setlist('a', l1)
del l1[:]
self.assert_equal(md['a'], 1)
# setdefault, setlistdefault
self.assert_equal(md.setdefault('u', 23), 23)
self.assert_equal(md.getlist('u'), [23])
del md['u']
md.setlist('u', [-1, -2])
# delitem
del md['u']
with self.assert_raises(KeyError):
md['u']
del md['d']
self.assert_equal(md.getlist('d'), [])
# keys, values, items, lists
self.assert_equal(list(sorted(md.keys())), ['a', 'b', 'c'])
self.assert_equal(list(sorted(iterkeys(md))), ['a', 'b', 'c'])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(itervalues(md))), [1, 2, 3])
self.assert_equal(list(sorted(md.items())),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.items(multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md))),
[('a', 1), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(iteritems(md, multi=True))),
[('a', 1), ('a', 2), ('a', 3), ('b', 2), ('c', 3)])
self.assert_equal(list(sorted(md.lists())),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
self.assert_equal(list(sorted(iterlists(md))),
[('a', [1, 2, 3]), ('b', [2]), ('c', [3])])
# copy method
c = md.copy()
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# copy method 2
c = copy(md)
self.assert_equal(c['a'], 1)
self.assert_equal(c.getlist('a'), [1, 2, 3])
# update with a multidict
od = self.storage_class([('a', 4), ('a', 5), ('y', 0)])
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4, 5])
self.assert_equal(md.getlist('y'), [0])
# update with a regular dict
md = c
od = {'a': 4, 'y': 0}
md.update(od)
self.assert_equal(md.getlist('a'), [1, 2, 3, 4])
self.assert_equal(md.getlist('y'), [0])
# pop, poplist, popitem, popitemlist
self.assert_equal(md.pop('y'), 0)
assert 'y' not in md
self.assert_equal(md.poplist('a'), [1, 2, 3, 4])
assert 'a' not in md
self.assert_equal(md.poplist('missing'), [])
# remaining: b=2, c=3
popped = md.popitem()
assert popped in [('b', 2), ('c', 3)]
popped = md.popitemlist()
assert popped in [('b', [2]), ('c', [3])]
# type conversion
md = self.storage_class({'a': '4', 'b': ['2', '3']})
self.assert_equal(md.get('a', type=int), 4)
self.assert_equal(md.getlist('b', type=int), [2, 3])
# repr
md = self.storage_class([('a', 1), ('a', 2), ('b', 3)])
assert "('a', 1)" in repr(md)
assert "('a', 2)" in repr(md)
assert "('b', 3)" in repr(md)
# add and getlist
md.add('c', '42')
md.add('c', '23')
self.assert_equal(md.getlist('c'), ['42', '23'])
md.add('c', 'blah')
self.assert_equal(md.getlist('c', type=int), [42, 23])
# setdefault
md = self.storage_class()
md.setdefault('x', []).append(42)
md.setdefault('x', []).append(23)
self.assert_equal(md['x'], [42, 23])
# to dict
md = self.storage_class()
md['foo'] = 42
md.add('bar', 1)
md.add('bar', 2)
self.assert_equal(md.to_dict(), {'foo': 42, 'bar': 1})
self.assert_equal(md.to_dict(flat=False), {'foo': [42], 'bar': [1, 2]})
# popitem from empty dict
with self.assert_raises(KeyError):
self.storage_class().popitem()
with self.assert_raises(KeyError):
self.storage_class().popitemlist()
# key errors are of a special type
with self.assert_raises(BadRequestKeyError):
self.storage_class()[42]
# setlist works
md = self.storage_class()
md['foo'] = 42
md.setlist('foo', [1, 2])
self.assert_equal(md.getlist('foo'), [1, 2])
class ImmutableDictBaseTestCase(WerkzeugTestCase):
storage_class = None
def test_follows_dict_interface(self):
cls = self.storage_class
data = {'foo': 1, 'bar': 2, 'baz': 3}
d = cls(data)
self.assert_equal(d['foo'], 1)
self.assert_equal(d['bar'], 2)
self.assert_equal(d['baz'], 3)
self.assert_equal(sorted(d.keys()), ['bar', 'baz', 'foo'])
self.assert_true('foo' in d)
self.assert_true('foox' not in d)
self.assert_equal(len(d), 3)
def test_copies_are_mutable(self):
cls = self.storage_class
immutable = cls({'a': 1})
with self.assert_raises(TypeError):
immutable.pop('a')
mutable = immutable.copy()
mutable.pop('a')
self.assert_true('a' in immutable)
self.assert_true(mutable is not immutable)
self.assert_true(copy(immutable) is immutable)
def test_dict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': 1, 'b': 2})
immutable2 = cls({'a': 2, 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableTypeConversionDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableTypeConversionDict
class ImmutableMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableMultiDict
def test_multidict_is_hashable(self):
cls = self.storage_class
immutable = cls({'a': [1, 2], 'b': 2})
immutable2 = cls({'a': [1], 'b': 2})
x = set([immutable])
self.assert_true(immutable in x)
self.assert_true(immutable2 not in x)
x.discard(immutable)
self.assert_true(immutable not in x)
self.assert_true(immutable2 not in x)
x.add(immutable2)
self.assert_true(immutable not in x)
self.assert_true(immutable2 in x)
x.add(immutable)
self.assert_true(immutable in x)
self.assert_true(immutable2 in x)
class ImmutableDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableDict
class ImmutableOrderedMultiDictTestCase(ImmutableDictBaseTestCase):
storage_class = datastructures.ImmutableOrderedMultiDict
def test_ordered_multidict_is_hashable(self):
a = self.storage_class([('a', 1), ('b', 1), ('a', 2)])
b = self.storage_class([('a', 1), ('a', 2), ('b', 1)])
self.assert_not_equal(hash(a), hash(b))
class MultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.MultiDict
def test_multidict_pop(self):
make_d = lambda: self.storage_class({'foo': [1, 2, 3, 4]})
d = make_d()
self.assert_equal(d.pop('foo'), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foo', 32), 1)
assert not d
d = make_d()
self.assert_equal(d.pop('foos', 32), 32)
assert d
with self.assert_raises(KeyError):
d.pop('foos')
def test_setlistdefault(self):
md = self.storage_class()
self.assert_equal(md.setlistdefault('u', [-1, -2]), [-1, -2])
self.assert_equal(md.getlist('u'), [-1, -2])
self.assert_equal(md['u'], -1)
def test_iter_interfaces(self):
mapping = [('a', 1), ('b', 2), ('a', 2), ('d', 3),
('a', 1), ('a', 3), ('d', 4), ('c', 3)]
md = self.storage_class(mapping)
self.assert_equal(list(zip(md.keys(), md.listvalues())),
list(md.lists()))
self.assert_equal(list(zip(md, iterlistvalues(md))),
list(iterlists(md)))
self.assert_equal(list(zip(iterkeys(md), iterlistvalues(md))),
list(iterlists(md)))
class OrderedMultiDictTestCase(MutableMultiDictBaseTestCase):
storage_class = datastructures.OrderedMultiDict
def test_ordered_interface(self):
cls = self.storage_class
d = cls()
assert not d
d.add('foo', 'bar')
self.assert_equal(len(d), 1)
d.add('foo', 'baz')
self.assert_equal(len(d), 1)
self.assert_equal(list(iteritems(d)), [('foo', 'bar')])
self.assert_equal(list(d), ['foo'])
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 'bar'), ('foo', 'baz')])
del d['foo']
assert not d
self.assert_equal(len(d), 0)
self.assert_equal(list(d), [])
d.update([('foo', 1), ('foo', 2), ('bar', 42)])
d.add('foo', 3)
self.assert_equal(d.getlist('foo'), [1, 2, 3])
self.assert_equal(d.getlist('bar'), [42])
self.assert_equal(list(iteritems(d)), [('foo', 1), ('bar', 42)])
expected = ['foo', 'bar']
self.assert_sequence_equal(list(d.keys()), expected)
self.assert_sequence_equal(list(d), expected)
self.assert_sequence_equal(list(iterkeys(d)), expected)
self.assert_equal(list(iteritems(d, multi=True)),
[('foo', 1), ('foo', 2), ('bar', 42), ('foo', 3)])
self.assert_equal(len(d), 2)
self.assert_equal(d.pop('foo'), 1)
assert d.pop('blafasel', None) is None
self.assert_equal(d.pop('blafasel', 42), 42)
self.assert_equal(len(d), 1)
self.assert_equal(d.poplist('bar'), [42])
assert not d
d.get('missingkey') is None
d.add('foo', 42)
d.add('foo', 23)
d.add('bar', 2)
d.add('foo', 42)
self.assert_equal(d, datastructures.MultiDict(d))
id = self.storage_class(d)
self.assert_equal(d, id)
d.add('foo', 2)
assert d != id
d.update({'blah': [1, 2, 3]})
self.assert_equal(d['blah'], 1)
self.assert_equal(d.getlist('blah'), [1, 2, 3])
# setlist works
d = self.storage_class()
d['foo'] = 42
d.setlist('foo', [1, 2])
self.assert_equal(d.getlist('foo'), [1, 2])
with self.assert_raises(BadRequestKeyError):
d.pop('missing')
with self.assert_raises(BadRequestKeyError):
d['missing']
# popping
d = self.storage_class()
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitem(), ('foo', 23))
with self.assert_raises(BadRequestKeyError):
d.popitem()
assert not d
d.add('foo', 23)
d.add('foo', 42)
d.add('foo', 1)
self.assert_equal(d.popitemlist(), ('foo', [23, 42, 1]))
with self.assert_raises(BadRequestKeyError):
d.popitemlist()
def test_iterables(self):
a = datastructures.MultiDict((("key_a", "value_a"),))
b = datastructures.MultiDict((("key_b", "value_b"),))
ab = datastructures.CombinedMultiDict((a,b))
self.assert_equal(sorted(ab.lists()), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(ab.listvalues()), [['value_a'], ['value_b']])
self.assert_equal(sorted(ab.keys()), ["key_a", "key_b"])
self.assert_equal(sorted(iterlists(ab)), [('key_a', ['value_a']), ('key_b', ['value_b'])])
self.assert_equal(sorted(iterlistvalues(ab)), [['value_a'], ['value_b']])
self.assert_equal(sorted(iterkeys(ab)), ["key_a", "key_b"])
class CombinedMultiDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CombinedMultiDict
def test_basic_interface(self):
d1 = datastructures.MultiDict([('foo', '1')])
d2 = datastructures.MultiDict([('bar', '2'), ('bar', '3')])
d = self.storage_class([d1, d2])
# lookup
self.assert_equal(d['foo'], '1')
self.assert_equal(d['bar'], '2')
self.assert_equal(d.getlist('bar'), ['2', '3'])
self.assert_equal(sorted(d.items()),
[('bar', '2'), ('foo', '1')])
self.assert_equal(sorted(d.items(multi=True)),
[('bar', '2'), ('bar', '3'), ('foo', '1')])
assert 'missingkey' not in d
assert 'foo' in d
# type lookup
self.assert_equal(d.get('foo', type=int), 1)
self.assert_equal(d.getlist('bar', type=int), [2, 3])
# get key errors for missing stuff
with self.assert_raises(KeyError):
d['missing']
# make sure that they are immutable
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# copies are immutable
d = d.copy()
with self.assert_raises(TypeError):
d['foo'] = 'blub'
# make sure lists merges
md1 = datastructures.MultiDict((("foo", "bar"),))
md2 = datastructures.MultiDict((("foo", "blafasel"),))
x = self.storage_class((md1, md2))
self.assert_equal(list(iterlists(x)), [('foo', ['bar', 'blafasel'])])
class HeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.Headers
def test_basic_interface(self):
headers = self.storage_class()
headers.add('Content-Type', 'text/plain')
headers.add('X-Foo', 'bar')
assert 'x-Foo' in headers
assert 'Content-type' in headers
headers['Content-Type'] = 'foo/bar'
self.assert_equal(headers['Content-Type'], 'foo/bar')
self.assert_equal(len(headers.getlist('Content-Type')), 1)
# list conversion
self.assert_equal(headers.to_wsgi_list(), [
('Content-Type', 'foo/bar'),
('X-Foo', 'bar')
])
self.assert_equal(str(headers), (
"Content-Type: foo/bar\r\n"
"X-Foo: bar\r\n"
"\r\n"))
self.assert_equal(str(self.storage_class()), "\r\n")
# extended add
headers.add('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(headers['Content-Disposition'],
'attachment; filename=foo')
headers.add('x', 'y', z='"')
self.assert_equal(headers['x'], r'y; z="\""')
def test_defaults_and_conversion(self):
# defaults
headers = self.storage_class([
('Content-Type', 'text/plain'),
('X-Foo', 'bar'),
('X-Bar', '1'),
('X-Bar', '2')
])
self.assert_equal(headers.getlist('x-bar'), ['1', '2'])
self.assert_equal(headers.get('x-Bar'), '1')
self.assert_equal(headers.get('Content-Type'), 'text/plain')
self.assert_equal(headers.setdefault('X-Foo', 'nope'), 'bar')
self.assert_equal(headers.setdefault('X-Bar', 'nope'), '1')
self.assert_equal(headers.setdefault('X-Baz', 'quux'), 'quux')
self.assert_equal(headers.setdefault('X-Baz', 'nope'), 'quux')
headers.pop('X-Baz')
# type conversion
self.assert_equal(headers.get('x-bar', type=int), 1)
self.assert_equal(headers.getlist('x-bar', type=int), [1, 2])
# list like operations
self.assert_equal(headers[0], ('Content-Type', 'text/plain'))
self.assert_equal(headers[:1], self.storage_class([('Content-Type', 'text/plain')]))
del headers[:2]
del headers[-1]
self.assert_equal(headers, self.storage_class([('X-Bar', '1')]))
def test_copying(self):
a = self.storage_class([('foo', 'bar')])
b = a.copy()
a.add('foo', 'baz')
self.assert_equal(a.getlist('foo'), ['bar', 'baz'])
self.assert_equal(b.getlist('foo'), ['bar'])
def test_popping(self):
headers = self.storage_class([('a', 1)])
self.assert_equal(headers.pop('a'), 1)
self.assert_equal(headers.pop('b', 2), 2)
with self.assert_raises(KeyError):
headers.pop('c')
def test_set_arguments(self):
a = self.storage_class()
a.set('Content-Disposition', 'useless')
a.set('Content-Disposition', 'attachment', filename='foo')
self.assert_equal(a['Content-Disposition'], 'attachment; filename=foo')
def test_reject_newlines(self):
h = self.storage_class()
for variation in 'foo\nbar', 'foo\r\nbar', 'foo\rbar':
with self.assert_raises(ValueError):
h['foo'] = variation
with self.assert_raises(ValueError):
h.add('foo', variation)
with self.assert_raises(ValueError):
h.add('foo', 'test', option=variation)
with self.assert_raises(ValueError):
h.set('foo', variation)
with self.assert_raises(ValueError):
h.set('foo', 'test', option=variation)
def test_slicing(self):
# there's nothing wrong with these being native strings
# Headers doesn't care about the data types
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('Content-Type', 'application/whocares')
h.set('X-Forwarded-For', '192.168.0.123')
h[:] = [(k, v) for k, v in h if k.startswith(u'X-')]
self.assert_equal(list(h), [
('X-Foo-Poo', 'bleh'),
('X-Forwarded-For', '192.168.0.123')
])
def test_bytes_operations(self):
h = self.storage_class()
h.set('X-Foo-Poo', 'bleh')
h.set('X-Whoops', b'\xff')
self.assert_equal(h.get('x-foo-poo', as_bytes=True), b'bleh')
self.assert_equal(h.get('x-whoops', as_bytes=True), b'\xff')
class EnvironHeadersTestCase(WerkzeugTestCase):
storage_class = datastructures.EnvironHeaders
def test_basic_interface(self):
# this happens in multiple WSGI servers because they
# use a vary naive way to convert the headers;
broken_env = {
'HTTP_CONTENT_TYPE': 'text/html',
'CONTENT_TYPE': 'text/html',
'HTTP_CONTENT_LENGTH': '0',
'CONTENT_LENGTH': '0',
'HTTP_ACCEPT': '*',
'wsgi.version': (1, 0)
}
headers = self.storage_class(broken_env)
assert headers
self.assert_equal(len(headers), 3)
self.assert_equal(sorted(headers), [
('Accept', '*'),
('Content-Length', '0'),
('Content-Type', 'text/html')
])
assert not self.storage_class({'wsgi.version': (1, 0)})
self.assert_equal(len(self.storage_class({'wsgi.version': (1, 0)})), 0)
def test_return_type_is_unicode(self):
# environ contains native strings; we return unicode
headers = self.storage_class({
'HTTP_FOO': '\xe2\x9c\x93',
'CONTENT_TYPE': 'text/plain',
})
self.assert_equal(headers['Foo'], u"\xe2\x9c\x93")
assert isinstance(headers['Foo'], text_type)
assert isinstance(headers['Content-Type'], text_type)
iter_output = dict(iter(headers))
self.assert_equal(iter_output['Foo'], u"\xe2\x9c\x93")
assert isinstance(iter_output['Foo'], text_type)
assert isinstance(iter_output['Content-Type'], text_type)
def test_bytes_operations(self):
foo_val = '\xff'
h = self.storage_class({
'HTTP_X_FOO': foo_val
})
self.assert_equal(h.get('x-foo', as_bytes=True), b'\xff')
self.assert_equal(h.get('x-foo'), u'\xff')
class HeaderSetTestCase(WerkzeugTestCase):
storage_class = datastructures.HeaderSet
def test_basic_interface(self):
hs = self.storage_class()
hs.add('foo')
hs.add('bar')
assert 'Bar' in hs
self.assert_equal(hs.find('foo'), 0)
self.assert_equal(hs.find('BAR'), 1)
assert hs.find('baz') < 0
hs.discard('missing')
hs.discard('foo')
assert hs.find('foo') < 0
self.assert_equal(hs.find('bar'), 0)
with self.assert_raises(IndexError):
hs.index('missing')
self.assert_equal(hs.index('bar'), 0)
assert hs
hs.clear()
assert not hs
class ImmutableListTestCase(WerkzeugTestCase):
storage_class = datastructures.ImmutableList
def test_list_hashable(self):
t = (1, 2, 3, 4)
l = self.storage_class(t)
self.assert_equal(hash(t), hash(l))
self.assert_not_equal(t, l)
def make_call_asserter(assert_equal_func, func=None):
"""Utility to assert a certain number of function calls.
>>> assert_calls, func = make_call_asserter(self.assert_equal)
>>> with assert_calls(2):
func()
func()
"""
calls = [0]
@contextmanager
def asserter(count, msg=None):
calls[0] = 0
yield
assert_equal_func(calls[0], count, msg)
def wrapped(*args, **kwargs):
calls[0] += 1
if func is not None:
return func(*args, **kwargs)
return asserter, wrapped
class CallbackDictTestCase(WerkzeugTestCase):
storage_class = datastructures.CallbackDict
def test_callback_dict_reads(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(0, 'callback triggered by read-only method'):
# read-only methods
dct['a']
dct.get('a')
self.assert_raises(KeyError, lambda: dct['x'])
'a' in dct
list(iter(dct))
dct.copy()
with assert_calls(0, 'callback triggered without modification'):
# methods that may write but don't
dct.pop('z', None)
dct.setdefault('a')
def test_callback_dict_writes(self):
assert_calls, func = make_call_asserter(self.assert_equal)
initial = {'a': 'foo', 'b': 'bar'}
dct = self.storage_class(initial=initial, on_update=func)
with assert_calls(8, 'callback not triggered by write method'):
# always-write methods
dct['z'] = 123
dct['z'] = 123 # must trigger again
del dct['z']
dct.pop('b', None)
dct.setdefault('x')
dct.popitem()
dct.update([])
dct.clear()
with assert_calls(0, 'callback triggered by failed del'):
self.assert_raises(KeyError, lambda: dct.__delitem__('x'))
with assert_calls(0, 'callback triggered by failed pop'):
self.assert_raises(KeyError, lambda: dct.pop('x'))
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(MultiDictTestCase))
suite.addTest(unittest.makeSuite(OrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(CombinedMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableTypeConversionDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableMultiDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableDictTestCase))
suite.addTest(unittest.makeSuite(ImmutableOrderedMultiDictTestCase))
suite.addTest(unittest.makeSuite(HeadersTestCase))
suite.addTest(unittest.makeSuite(EnvironHeadersTestCase))
suite.addTest(unittest.makeSuite(HeaderSetTestCase))
suite.addTest(unittest.makeSuite(NativeItermethodsTestCase))
suite.addTest(unittest.makeSuite(CallbackDictTestCase))
return suite
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class R(models.Model):
is_default = models.BooleanField(default=False)
def __str__(self):
return "%s" % self.pk
get_default_r = lambda: R.objects.get_or_create(is_default=True)[0]
class S(models.Model):
r = models.ForeignKey(R)
class T(models.Model):
s = models.ForeignKey(S)
class U(models.Model):
t = models.ForeignKey(T)
class RChild(R):
pass
class A(models.Model):
name = models.CharField(max_length=30)
auto = models.ForeignKey(R, related_name="auto_set")
auto_nullable = models.ForeignKey(R, null=True,
related_name='auto_nullable_set')
setvalue = models.ForeignKey(R, on_delete=models.SET(get_default_r),
related_name='setvalue')
setnull = models.ForeignKey(R, on_delete=models.SET_NULL, null=True,
related_name='setnull_set')
setdefault = models.ForeignKey(R, on_delete=models.SET_DEFAULT,
default=get_default_r, related_name='setdefault_set')
setdefault_none = models.ForeignKey(R, on_delete=models.SET_DEFAULT,
default=None, null=True, related_name='setnull_nullable_set')
cascade = models.ForeignKey(R, on_delete=models.CASCADE,
related_name='cascade_set')
cascade_nullable = models.ForeignKey(R, on_delete=models.CASCADE, null=True,
related_name='cascade_nullable_set')
protect = models.ForeignKey(R, on_delete=models.PROTECT, null=True)
donothing = models.ForeignKey(R, on_delete=models.DO_NOTHING, null=True,
related_name='donothing_set')
child = models.ForeignKey(RChild, related_name="child")
child_setnull = models.ForeignKey(RChild, on_delete=models.SET_NULL, null=True,
related_name="child_setnull")
# A OneToOneField is just a ForeignKey unique=True, so we don't duplicate
# all the tests; just one smoke test to ensure on_delete works for it as
# well.
o2o_setnull = models.ForeignKey(R, null=True,
on_delete=models.SET_NULL, related_name="o2o_nullable_set")
def create_a(name):
a = A(name=name)
for name in ('auto', 'auto_nullable', 'setvalue', 'setnull', 'setdefault',
'setdefault_none', 'cascade', 'cascade_nullable', 'protect',
'donothing', 'o2o_setnull'):
r = R.objects.create()
setattr(a, name, r)
a.child = RChild.objects.create()
a.child_setnull = RChild.objects.create()
a.save()
return a
class M(models.Model):
m2m = models.ManyToManyField(R, related_name="m_set")
m2m_through = models.ManyToManyField(R, through="MR",
related_name="m_through_set")
m2m_through_null = models.ManyToManyField(R, through="MRNull",
related_name="m_through_null_set")
class MR(models.Model):
m = models.ForeignKey(M)
r = models.ForeignKey(R)
class MRNull(models.Model):
m = models.ForeignKey(M)
r = models.ForeignKey(R, null=True, on_delete=models.SET_NULL)
class Avatar(models.Model):
desc = models.TextField(null=True)
class User(models.Model):
avatar = models.ForeignKey(Avatar, null=True)
class HiddenUser(models.Model):
r = models.ForeignKey(R, related_name="+")
class HiddenUserProfile(models.Model):
user = models.ForeignKey(HiddenUser)
class M2MTo(models.Model):
pass
class M2MFrom(models.Model):
m2m = models.ManyToManyField(M2MTo)
class Parent(models.Model):
pass
class Child(Parent):
pass
class Base(models.Model):
pass
class RelToBase(models.Model):
base = models.ForeignKey(Base, on_delete=models.DO_NOTHING)
"""
``revscoring score -h``
::
Scores a set of revisions.
Usage:
score (-h | --help)
score --host= [...]
[--rev-ids=] [--cache=] [--caches=]
[--batch-size=] [--io-workers=] [--cpu-workers=]
[--debug] [--verbose]
Options:
-h --help Print this documentation
Path to a model file
--host= The url pointing to a MediaWiki API to use for
extracting features
A revision identifier to score.
--rev-ids= The path to a file containing revision identifiers
to score (expects a column called 'rev_id'). If
any are provided, this argument is
ignored. [default: ]
--cache= A JSON blob of cache values to use during
extraction for every call.
--caches= A JSON blob of rev_id-->cache value pairs to use
during extraction
--batch-size= The size of the revisions to batch when requesting
data from the API [default: 50]
--io-workers= The number of worker processes to use for
requesting data from the API [default: ]
--cpu-workers= The number of worker processes to use for
extraction and scoring [default: ]
--debug Print debug logging
--verbose Print feature extraction debug logging
"""
import json
import logging
import sys
from multiprocessing import cpu_count
import docopt
import mwapi
import mysqltsv
from ..extractors import api
from ..score_processor import ScoreProcessor
from ..scoring import Model, models
def main(argv=None):
args = docopt.docopt(__doc__, argv=argv)
logging.basicConfig(
level=logging.DEBUG if args['--debug'] else logging.WARNING,
format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'
)
logging.getLogger('requests').setLevel(logging.WARNING)
logging.getLogger('revscoring.dependencies.dependent') \
.setLevel(logging.WARNING)
scoring_model = Model.load(models.open_file(args['']))
session = mwapi.Session(
args['--host'],
user_agent="Revscoring score utility ")
extractor = api.Extractor(session)
if len(args['']) > 0:
rev_ids = (int(rev_id) for rev_id in args[''])
else:
if args['--rev-ids'] == "":
rev_ids_f = sys.stdin
else:
rev_ids_f = open(args['--rev-ids'])
rev_ids = (int(row.rev_id) for row in mysqltsv.read(rev_ids_f))
if args['--caches'] is not None:
caches = json.loads(args['--caches'])
else:
caches = None
if args['--cache'] is not None:
cache = json.loads(args['--cache'])
else:
cache = None
batch_size = int(args['--batch-size'])
if args['--cpu-workers'] == "":
cpu_workers = cpu_count()
else:
cpu_workers = int(args['--cpu-workers'])
if args['--io-workers'] == "":
io_workers = None
else:
io_workers = int(args['--io-workers'])
verbose = args['--verbose']
debug = args['--debug']
score_processor = ScoreProcessor(
scoring_model, extractor, batch_size=batch_size,
cpu_workers=cpu_workers, io_workers=io_workers)
run(score_processor, rev_ids, caches, cache, debug, verbose)
def run(score_processor, rev_ids, caches, cache, debug, verbose):
rev_scores = score_processor.score(rev_ids, caches, cache)
for rev_id, score in rev_scores:
print("\t".join([str(rev_id), json.dumps(score)]))
if verbose:
if 'error' in score:
if "NotFound" in score['error']['type']:
sys.stderr.write("?")
elif "Deleted" in score['error']['type']:
sys.stderr.write("d")
else:
sys.stderr.write("e")
else:
sys.stderr.write(".")
sys.stderr.flush()
"""Helper module to factorize the conditional multiprocessing import logic
We use a distinct module to simplify import statements and avoid introducing
circular dependencies (for instance for the assert_spawning name).
"""
import os
import warnings
# Obtain possible configuration from the environment, assuming 1 (on)
# by default, upon 0 set to None. Should instructively fail if some non
# 0/1 value is set.
mp = int(os.environ.get('JOBLIB_MULTIPROCESSING', 1)) or None
if mp:
try:
import multiprocessing as mp
import multiprocessing.pool
except ImportError:
mp = None
# 2nd stage: validate that locking is available on the system and
# issue a warning if not
if mp is not None:
try:
_sem = mp.Semaphore()
del _sem # cleanup
except (ImportError, OSError) as e:
mp = None
warnings.warn('%s. joblib will operate in serial mode' % (e,))
# 3rd stage: backward compat for the assert_spawning helper
if mp is not None:
try:
# Python 3.4+
from multiprocessing.context import assert_spawning
except ImportError:
from multiprocessing.forking import assert_spawning
else:
assert_spawning = None
# Copyright 2014 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from portage.tests import TestCase
from portage.tests.resolver.ResolverPlayground import (ResolverPlayground,
ResolverPlaygroundTestCase)
class AutounmaskUseBreakageTestCase(TestCase):
def testAutounmaskUseBreakage(self):
ebuilds = {
"app-misc/A-0" : {
"EAPI": "5",
"RDEPEND": "app-misc/D[-foo]",
},
"app-misc/B-0" : {
"EAPI": "5",
"RDEPEND": "app-misc/D[foo]"
},
"app-misc/C-0" : {
"EAPI": "5",
"RDEPEND": ">=app-misc/D-1"
},
"app-misc/D-0" : {
"EAPI": "5",
"IUSE": "foo"
},
"app-misc/D-1" : {
"EAPI": "5",
"IUSE": "bar"
},
}
test_cases = (
# Bug 510270
# _solve_non_slot_operator_slot_conflicts throws
# IndexError: tuple index out of range
# due to autounmask USE breakage.
ResolverPlaygroundTestCase(
["app-misc/C", "app-misc/B", "app-misc/A"],
all_permutations = True,
success = False,
ambiguous_slot_collision_solutions = True,
slot_collision_solutions = [None, []]
),
)
playground = ResolverPlayground(ebuilds=ebuilds, debug=False)
try:
for test_case in test_cases:
playground.run_TestCase(test_case)
self.assertEqual(test_case.test_success, True, test_case.fail_msg)
finally:
playground.cleanup()
from LogAnalyzer import Test,TestResult
import DataflashLog
from VehicleType import VehicleType
import collections
class TestPitchRollCoupling(Test):
'''test for divergence between input and output pitch/roll, i.e. mechanical failure or bad PID tuning'''
# TODO: currently we're only checking for roll/pitch outside of max lean angle, will come back later to analyze roll/pitch in versus out values
def __init__(self):
Test.__init__(self)
self.name = "Pitch/Roll"
self.enable = True # TEMP
def run(self, logdata, verbose):
self.result = TestResult()
self.result.status = TestResult.StatusType.GOOD
if logdata.vehicleType != VehicleType.Copter:
self.result.status = TestResult.StatusType.NA
return
if not "ATT" in logdata.channels:
self.result.status = TestResult.StatusType.UNKNOWN
self.result.statusMessage = "No ATT log data"
return
# figure out where each mode begins and ends, so we can treat auto and manual modes differently and ignore acro/tune modes
autoModes = ["RTL",
"AUTO",
"LAND",
"LOITER",
"GUIDED",
"CIRCLE",
"OF_LOITER",
"POSHOLD",
"BRAKE",
"AVOID_ADSB",
"GUIDED_NOGPS",
"SMARTRTL"]
# use CTUN RollIn/DesRoll + PitchIn/DesPitch
manualModes = ["STABILIZE", "DRIFT", "ALTHOLD", "ALT_HOLD", "POSHOLD"]
# ignore data from these modes:
ignoreModes = ["ACRO", "SPORT", "FLIP", "AUTOTUNE","", "THROW",]
autoSegments = [] # list of (startLine,endLine) pairs
manualSegments = [] # list of (startLine,endLine) pairs
orderedModes = collections.OrderedDict(sorted(logdata.modeChanges.items(), key=lambda t: t[0]))
isAuto = False # we always start in a manual control mode
prevLine = 0
mode = ""
for line,modepair in orderedModes.iteritems():
mode = modepair[0].upper()
if prevLine == 0:
prevLine = line
if mode in autoModes:
if not isAuto:
manualSegments.append((prevLine,line-1))
prevLine = line
isAuto = True
elif mode in manualModes:
if isAuto:
autoSegments.append((prevLine,line-1))
prevLine = line
isAuto = False
elif mode in ignoreModes:
if isAuto:
autoSegments.append((prevLine,line-1))
else:
manualSegments.append((prevLine,line-1))
prevLine = 0
else:
raise Exception("Unknown mode in TestPitchRollCoupling: %s" % mode)
# and handle the last segment, which doesn't have an ending
if mode in autoModes:
autoSegments.append((prevLine,logdata.lineCount))
elif mode in manualModes:
manualSegments.append((prevLine,logdata.lineCount))
# figure out max lean angle, the ANGLE_MAX param was added in AC3.1
maxLeanAngle = 45.0
if "ANGLE_MAX" in logdata.parameters:
maxLeanAngle = logdata.parameters["ANGLE_MAX"] / 100.0
maxLeanAngleBuffer = 10 # allow a buffer margin
# ignore anything below this altitude, to discard any data while not flying
minAltThreshold = 2.0
# look through manual+auto flight segments
# TODO: filter to ignore single points outside range?
(maxRoll, maxRollLine) = (0.0, 0)
(maxPitch, maxPitchLine) = (0.0, 0)
for (startLine,endLine) in manualSegments+autoSegments:
# quick up-front test, only fallover into more complex line-by-line check if max()>threshold
rollSeg = logdata.channels["ATT"]["Roll"].getSegment(startLine,endLine)
pitchSeg = logdata.channels["ATT"]["Pitch"].getSegment(startLine,endLine)
if not rollSeg.dictData and not pitchSeg.dictData:
continue
# check max roll+pitch for any time where relative altitude is above minAltThreshold
roll = max(abs(rollSeg.min()), abs(rollSeg.max()))
pitch = max(abs(pitchSeg.min()), abs(pitchSeg.max()))
if (roll>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll)) or (pitch>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch)):
lit = DataflashLog.LogIterator(logdata, startLine)
assert(lit.currentLine == startLine)
while lit.currentLine <= endLine:
relativeAlt = lit["CTUN"]["BarAlt"]
if relativeAlt > minAltThreshold:
roll = lit["ATT"]["Roll"]
pitch = lit["ATT"]["Pitch"]
if abs(roll)>(maxLeanAngle+maxLeanAngleBuffer) and abs(roll)>abs(maxRoll):
maxRoll = roll
maxRollLine = lit.currentLine
if abs(pitch)>(maxLeanAngle+maxLeanAngleBuffer) and abs(pitch)>abs(maxPitch):
maxPitch = pitch
maxPitchLine = lit.currentLine
next(lit)
# check for breaking max lean angles
if maxRoll and abs(maxRoll)>abs(maxPitch):
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Roll (%.2f, line %d) > maximum lean angle (%.2f)" % (maxRoll, maxRollLine, maxLeanAngle)
return
if maxPitch:
self.result.status = TestResult.StatusType.FAIL
self.result.statusMessage = "Pitch (%.2f, line %d) > maximum lean angle (%.2f)" % (maxPitch, maxPitchLine, maxLeanAngle)
return
# TODO: use numpy/scipy to check Roll+RollIn curves for fitness (ignore where we're not airborne)
# ...
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/scrubber.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import sys
import tempfile
import xml.etree.ElementTree as ElementTree
import zipfile
def remove_office_metadata(file_name):
"""
Remove all metadata from Microsoft Office 2007+ file types such as docx,
pptx, and xlsx.
:param str file_name: The path to the file whose metadata is to be removed.
"""
ns = {
'cp': 'http://schemas.openxmlformats.org/package/2006/metadata/core-properties',
'dc': 'http://purl.org/dc/elements/1.1/',
'dcterms': 'http://purl.org/dc/terms/',
'dcmitype': 'http://purl.org/dc/dcmitype/',
'xsi': 'http://www.w3.org/2001/XMLSchema-instance'
}
for prefix, uri in ns.items():
ElementTree.register_namespace(prefix, uri)
_, file_ext = os.path.splitext(file_name)
tmpfd, tmpname = tempfile.mkstemp(dir=os.path.dirname(file_name), suffix=file_ext)
os.close(tmpfd)
with zipfile.ZipFile(file_name, 'r') as zin:
with zipfile.ZipFile(tmpname, 'w') as zout:
zout.comment = zin.comment
for item in zin.infolist():
data = zin.read(item.filename)
if item.filename == 'docProps/core.xml':
root = ElementTree.fromstring(data)
root.clear()
data = ElementTree.tostring(root, 'UTF-8')
zout.writestr(item, data)
os.remove(file_name)
os.rename(tmpname, file_name)
def main():
if len(sys.argv) < 2:
print("usage: {0} [path to document]".format(os.path.basename(sys.argv[0])))
return 0
file_path = sys.argv[1]
if not os.path.isfile(file_path):
print('[-] the specified path is not a file')
return 1
if not os.access(file_path, os.R_OK | os.W_OK):
print('[-] insufficient permissions to the specified file')
return 1
remove_office_metadata(file_path)
return 0
if __name__ == '__main__':
sys.exit(main())
from .main import Manage
def start():
return Manage()
config = [{
'name': 'manage',
'groups': [
{
'tab': 'manage',
'label': 'Movie Library Manager',
'description': 'Add your existing movie folders.',
'options': [
{
'name': 'enabled',
'default': False,
'type': 'enabler',
},
{
'name': 'library',
'type': 'directories',
'description': 'Folder where the movies should be moved to.',
},
{
'label': 'Cleanup After',
'name': 'cleanup',
'type': 'bool',
'description': 'Remove movie from db if it can\'t be found after re-scan.',
'default': True,
},
{
'label': 'Scan at startup',
'name': 'startup_scan',
'type': 'bool',
'default': True,
'advanced': True,
'description': 'Do a quick scan on startup. On slow systems better disable this.',
},
],
},
],
}]
#!/usr/bin/env python
from __future__ import print_function, division
import bayesloop as bl
import numpy as np
class TestBuiltin:
def test_static(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.Static()
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.372209708143769, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_deterministic(self):
S = bl.HyperStudy()
S.loadData(np.array([1, 2, 3, 4, 5]))
def linear(t, a=[1, 2]):
return 0.5 + 0.2*a*t
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.Deterministic(linear, target='rate')
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -9.4050089375418136, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_gaussianrandomwalk(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.GaussianRandomWalk('sigma', 0.2, target='rate')
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.323144246611964, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_alphastablerandomwalk(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.AlphaStableRandomWalk('c', 0.2, 'alpha', 1.5, target='rate')
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.122384638661309, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_changepoint(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.ChangePoint('t_change', 2)
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -12.894336092378385, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_regimeswitch(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.RegimeSwitch('p_min', -3)
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.372866559561402, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_independent(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.Independent()
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -11.087360077190617, decimal=5,
err_msg='Erroneous log-evidence value.')
def test_notequal(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.NotEqual('p_min', -3)
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -10.569099863134156, decimal=5,
err_msg='Erroneous log-evidence value.')
class TestNested:
def test_nested(self):
S = bl.Study()
S.loadData(np.array([1, 2, 3, 4, 5]))
L = bl.om.Poisson('rate', bl.oint(0, 6, 100))
T = bl.tm.SerialTransitionModel(
bl.tm.Static(),
bl.tm.ChangePoint('t_change', 1),
bl.tm.CombinedTransitionModel(
bl.tm.GaussianRandomWalk('sigma', 0.2, target='rate'),
bl.tm.RegimeSwitch('p_min', -3)
),
bl.tm.BreakPoint('t_break', 3),
bl.tm.Independent()
)
S.set(L, T)
S.fit()
# test model evidence value
np.testing.assert_almost_equal(S.logEvidence, -13.269918024215237, decimal=5,
err_msg='Erroneous log-evidence value.')
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AST conversion templates.
Adapted from Tangent.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ast
import textwrap
import gast
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import ast_util
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import qual_names
class ContextAdjuster(gast.NodeTransformer):
"""Adjusts the ctx field of nodes to ensure consistency.
This transformer can change the ctx fields of a variable, tuple and other
AST elements that allow one, based on whether the element is being read or
written.
"""
def __init__(self, override_value):
self._ctx_override = override_value
def visit(self, node):
original_override = self._ctx_override
node = super(ContextAdjuster, self).visit(node)
if hasattr(node, 'ctx'):
assert node.ctx is not None, 'node {} has ctx unset'.format(node)
self._ctx_override = original_override
return node
def _apply_override(self, node):
if self._ctx_override is not None:
node.ctx = self._ctx_override()
def visit_Attribute(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node = self.generic_visit(node)
return node
def visit_Tuple(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_List(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Name(self, node):
self._apply_override(node)
return self.generic_visit(node)
def visit_Call(self, node):
self._apply_override(node)
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Dict(self, node):
# We may be able to override these to Load(), but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Subscript(self, node):
self._apply_override(node)
self._ctx_override = gast.Load
node.value = self.visit(node.value)
return self.generic_visit(node)
def visit_comprehension(self, node):
# We may be able to override some of these, but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
def visit_Lambda(self, node):
# We may be able to override some of these, but for now it's simpler
# to just assert that they're set.
self._ctx_override = None
return self.generic_visit(node)
class ReplaceTransformer(gast.NodeTransformer):
"""Replace AST nodes."""
def __init__(self, replacements):
"""Create a new ReplaceTransformer.
Args:
replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by.
"""
self.replacements = replacements
self.in_replacements = False
self.preserved_annos = {
anno.Basic.DIRECTIVES,
anno.Basic.EXTRA_LOOP_TEST,
anno.Basic.ORIGIN,
anno.Basic.SKIP_PROCESSING,
anno.Static.ORIG_DEFINITIONS,
'function_context_name',
}
def _prepare_replacement(self, replaced, key):
"""Prepares a replacement AST that's safe to swap in for a node.
Args:
replaced: ast.AST, the node being replaced
key: Hashable, the key of the replacement AST
Returns:
ast.AST, the replacement AST
"""
repl = self.replacements[key]
new_nodes = ast_util.copy_clean(repl, preserve_annos=self.preserved_annos)
if isinstance(new_nodes, gast.AST):
new_nodes = [new_nodes]
return new_nodes
def visit_Expr(self, node):
# When replacing a placeholder with an entire statement, the replacement
# must stand on its own and not be wrapped in an Expr.
new_value = self.visit(node.value)
if new_value is node.value:
return node
return new_value
def visit_keyword(self, node):
if node.arg not in self.replacements:
return self.generic_visit(node)
repl = self._prepare_replacement(node, node.arg)
if isinstance(repl, gast.keyword):
return repl
elif (repl and isinstance(repl, (list, tuple)) and
all(isinstance(r, gast.keyword) for r in repl)):
return repl
# TODO(mdan): We may allow replacing with a string as well.
# For example, if one wanted to replace foo with bar in foo=baz, then
# we could allow changing just node arg, so that we end up with bar=baz.
raise ValueError(
'a keyword argument may only be replaced by another keyword or a '
'non-empty list of keywords. Found: {} for keyword {}'.format(
repl, node.arg))
def visit_FunctionDef(self, node):
node = self.generic_visit(node)
if node.name not in self.replacements:
return node
repl = self.replacements[node.name]
if not isinstance(repl, (gast.Name, ast.Name)):
raise ValueError(
'a function name can only be replaced by a Name node. Found: %s' %
repl)
node.name = repl.id
return node
def visit_Attribute(self, node):
node = self.generic_visit(node)
if node.attr not in self.replacements:
return node
repl = self.replacements[node.attr]
if not isinstance(repl, gast.Name):
raise ValueError(
'An attribute can only be replaced by a Name node. Found: %s' % repl)
node.attr = repl.id
return node
def visit_Name(self, node):
if node.id not in self.replacements:
return node
new_nodes = self._prepare_replacement(node, node.id)
if not new_nodes:
return new_nodes
# Preserve the target context.
adjuster = ContextAdjuster(type(node.ctx))
for n in new_nodes:
if hasattr(n, 'ctx'):
adjuster.visit(n)
if len(new_nodes) == 1:
new_nodes, = new_nodes
return new_nodes
def _convert_to_ast(n):
"""Converts from a known data type to AST."""
# Note: When generating AST nodes from strings/QNs in isolation, ctx is
# unknown. ctx must be filled in according to the template being used.
# See ReplaceTransformer.visit_Name.
if isinstance(n, str):
return gast.Name(id=n, ctx=None, annotation=None, type_comment=None)
if isinstance(n, qual_names.QN):
return n.ast()
if isinstance(n, list):
return [_convert_to_ast(e) for e in n]
if isinstance(n, tuple):
return tuple(_convert_to_ast(e) for e in n)
return n
def replace(template, **replacements):
"""Replaces placeholders in a Python template.
AST Name and Tuple nodes always receive the context that inferred from
the template. However, when replacing more complex nodes (that can potentially
contain Name children), then the caller is responsible for setting the
appropriate context.
Args:
template: A string representing Python code. Any symbol name can be used
that appears in the template code can be used as placeholder.
**replacements: A mapping from placeholder names to (lists of) AST nodes
that these placeholders will be replaced by. String values are also
supported as a shorthand for AST Name nodes with the respective ID.
Returns:
An AST node or list of AST nodes with the replacements made. If the
template was a function, a list will be returned. If the template was a
node, the same node will be returned. If the template was a string, an
AST node will be returned (a `Module` node in the case of a multi-line
string, an `Expr` node otherwise).
Raises:
ValueError: if the arguments are incorrect.
"""
if not isinstance(template, str):
raise ValueError('Expected string template, got %s' % type(template))
for k in replacements:
replacements[k] = _convert_to_ast(replacements[k])
template_str = parser.STANDARD_PREAMBLE + textwrap.dedent(template)
nodes = parser.parse(
template_str,
preamble_len=parser.STANDARD_PREAMBLE_LEN,
single_node=False)
results = []
for node in nodes:
node = ReplaceTransformer(replacements).visit(node)
if isinstance(node, (list, tuple)):
results.extend(node)
else:
results.append(node)
results = [qual_names.resolve(r) for r in results]
return results
def replace_as_expression(template, **replacements):
"""Variant of replace that generates expressions, instead of code blocks."""
replacement = replace(template, **replacements)
if len(replacement) != 1:
raise ValueError(
'single expression expected; for more general templates use replace')
node, = replacement
if isinstance(node, gast.Expr):
return node.value
elif isinstance(node, gast.Name):
return node
raise ValueError(
'the template is expected to generate an expression or a name node;'
' instead found %s' % node)
# This code is part of Ansible, but is an independent component.
# This particular file snippet, and this file snippet only, is BSD licensed.
# Modules you write using this snippet, which is embedded dynamically by Ansible
# still belong to the author of the module, and may assign their own license
# to the complete work.
#
# Copyright (c), Benjamin Jolivot , 2014
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
# USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import os
import time
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.pycompat24 import get_exception
#check for pyFG lib
try:
from pyFG import FortiOS, FortiConfig
from pyFG.exceptions import CommandExecutionException, FailedCommit
HAS_PYFG=True
except:
HAS_PYFG=False
fortios_argument_spec = dict(
host = dict(required=True ),
username = dict(required=True ),
password = dict(required=True, type='str', no_log=True ),
timeout = dict(type='int', default=60),
vdom = dict(type='str', default=None ),
backup = dict(type='bool', default=False),
backup_path = dict(type='path'),
backup_filename = dict(type='str'),
)
fortios_required_if = [
['backup', True , ['backup_path'] ],
]
fortios_error_codes = {
'-3':"Object not found",
'-61':"Command error"
}
def backup(module,running_config):
backup_path = module.params['backup_path']
backup_filename = module.params['backup_filename']
if not os.path.exists(backup_path):
try:
os.mkdir(backup_path)
except:
module.fail_json(msg="Can't create directory {0} Permission denied ?".format(backup_path))
tstamp = time.strftime("%Y-%m-%d@%H:%M:%S", time.localtime(time.time()))
if 0 < len(backup_filename):
filename = '%s/%s' % (backup_path, backup_filename)
else:
filename = '%s/%s_config.%s' % (backup_path, module.params['host'], tstamp)
try:
open(filename, 'w').write(running_config)
except:
module.fail_json(msg="Can't create backup file {0} Permission denied ?".format(filename))
class AnsibleFortios(object):
def __init__(self, module):
if not HAS_PYFG:
module.fail_json(msg='Could not import the python library pyFG required by this module')
self.result = {
'changed': False,
}
self.module = module
def _connect(self):
host = self.module.params['host']
username = self.module.params['username']
password = self.module.params['password']
timeout = self.module.params['timeout']
vdom = self.module.params['vdom']
self.forti_device = FortiOS(host, username=username, password=password, timeout=timeout, vdom=vdom)
try:
self.forti_device.open()
except Exception:
e = get_exception()
self.module.fail_json(msg='Error connecting device. %s' % e)
def load_config(self, path):
self._connect()
self.path = path
#get config
try:
self.forti_device.load_config(path=path)
self.result['running_config'] = self.forti_device.running_config.to_text()
except Exception:
self.forti_device.close()
e = get_exception()
self.module.fail_json(msg='Error reading running config. %s' % e)
#backup if needed
if self.module.params['backup']:
backup(self.module, self.result['running_config'])
self.candidate_config = self.forti_device.candidate_config
def apply_changes(self):
change_string = self.forti_device.compare_config()
if change_string:
self.result['change_string'] = change_string
self.result['changed'] = True
#Commit if not check mode
if change_string and not self.module.check_mode:
try:
self.forti_device.commit()
except FailedCommit:
#Something's wrong (rollback is automatic)
self.forti_device.close()
e = get_exception()
error_list = self.get_error_infos(e)
self.module.fail_json(msg_error_list=error_list, msg="Unable to commit change, check your args, the error was %s" % e.message )
self.forti_device.close()
self.module.exit_json(**self.result)
def del_block(self, block_id):
self.forti_device.candidate_config[self.path].del_block(block_id)
def add_block(self, block_id, block):
self.forti_device.candidate_config[self.path][block_id] = block
def get_error_infos(self, cli_errors):
error_list = []
for errors in cli_errors.args:
for error in errors:
error_code = error[0]
error_string = error[1]
error_type = fortios_error_codes.get(error_code,"unknown")
error_list.append(dict(error_code=error_code, error_type=error_type, error_string= error_string))
return error_list
def get_empty_configuration_block(self, block_name, block_type):
return FortiConfig(block_name, block_type)
#!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: ce_snmp_user
version_added: "2.4"
short_description: Manages SNMP user configuration on HUAWEI CloudEngine switches.
description:
- Manages SNMP user configurations on CloudEngine switches.
author:
- wangdezhuang (@CloudEngine-Ansible)
options:
acl_number:
description:
- Access control list number.
required: false
default: null
usm_user_name:
description:
- Unique name to identify the USM user.
required: false
default: null
aaa_local_user:
description:
- Unique name to identify the local user.
required: false
default: null
remote_engine_id:
description:
- Remote engine id of the USM user.
required: false
default: null
user_group:
description:
- Name of the group where user belongs to.
required: false
default: null
auth_protocol:
description:
- Authentication protocol.
required: false
default: null
choices: ['noAuth', 'md5', 'sha']
auth_key:
description:
- The authentication password. Password length, 8-255 characters.
required: false
default: null
priv_protocol:
description:
- Encryption protocol.
required: false
default: null
choices: ['noPriv', 'des56', '3des168', 'aes128', 'aes192', 'aes256']
priv_key:
description:
- The encryption password. Password length 8-255 characters.
required: false
default: null
'''
EXAMPLES = '''
- name: CloudEngine snmp user test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Config SNMP usm user"
ce_snmp_user:
state: present
usm_user_name: wdz_snmp
remote_engine_id: 800007DB03389222111200
acl_number: 2000
user_group: wdz_group
provider: "{{ cli }}"
- name: "Undo SNMP usm user"
ce_snmp_user:
state: absent
usm_user_name: wdz_snmp
remote_engine_id: 800007DB03389222111200
acl_number: 2000
user_group: wdz_group
provider: "{{ cli }}"
- name: "Config SNMP local user"
ce_snmp_user:
state: present
aaa_local_user: wdz_user
auth_protocol: md5
auth_key: huawei123
priv_protocol: des56
priv_key: huawei123
provider: "{{ cli }}"
- name: "Config SNMP local user"
ce_snmp_user:
state: absent
aaa_local_user: wdz_user
auth_protocol: md5
auth_key: huawei123
priv_protocol: des56
priv_key: huawei123
provider: "{{ cli }}"
'''
RETURN = '''
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {"acl_number": "2000", "remote_engine_id": "800007DB03389222111200",
"state": "present", "user_group": "wdz_group",
"usm_user_name": "wdz_snmp"}
existing:
description: k/v pairs of existing aaa server
returned: always
type: dict
sample: {"snmp local user": {"local_user_info": []},
"snmp usm user": {"usm_user_info": []}}
end_state:
description: k/v pairs of aaa params after module execution
returned: always
type: dict
sample: {"snmp local user": {"local_user_info": []},
"snmp usm user": {"usm_user_info": [{"aclNumber": "2000", "engineID": "800007DB03389222111200",
"groupName": "wdz_group", "userName": "wdz_snmp"}]}}
updates:
description: command sent to the device
returned: always
type: list
sample: ["snmp-agent remote-engineid 800007DB03389222111200 usm-user v3 wdz_snmp wdz_group acl 2000"]
'''
from xml.etree import ElementTree
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.cloudengine.ce import get_nc_config, set_nc_config, ce_argument_spec, get_config
# get snmp v3 USM user
CE_GET_SNMP_V3_USM_USER_HEADER = """
"""
CE_GET_SNMP_V3_USM_USER_TAIL = """
"""
# merge snmp v3 USM user
CE_MERGE_SNMP_V3_USM_USER_HEADER = """
%s%s%s
"""
CE_MERGE_SNMP_V3_USM_USER_TAIL = """
"""
# create snmp v3 USM user
CE_CREATE_SNMP_V3_USM_USER_HEADER = """
%s%s%s
"""
CE_CREATE_SNMP_V3_USM_USER_TAIL = """
"""
# delete snmp v3 USM user
CE_DELETE_SNMP_V3_USM_USER_HEADER = """
%s%s%s
"""
CE_DELETE_SNMP_V3_USM_USER_TAIL = """
"""
# get snmp v3 aaa local user
CE_GET_SNMP_V3_LOCAL_USER = """
"""
# merge snmp v3 aaa local user
CE_MERGE_SNMP_V3_LOCAL_USER = """
%s%s%s%s%s
"""
# create snmp v3 aaa local user
CE_CREATE_SNMP_V3_LOCAL_USER = """
%s%s%s%s%s
"""
# delete snmp v3 aaa local user
CE_DELETE_SNMP_V3_LOCAL_USER = """
%s%s%s%s%s
"""
class SnmpUser(object):
""" Manages SNMP user configuration """
def netconf_get_config(self, **kwargs):
""" Get configure by netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = get_nc_config(module, conf_str)
return xml_str
def netconf_set_config(self, **kwargs):
""" Set configure by netconf """
module = kwargs["module"]
conf_str = kwargs["conf_str"]
xml_str = set_nc_config(module, conf_str)
return xml_str
def check_snmp_v3_usm_user_args(self, **kwargs):
""" Check snmp v3 usm user invalid args """
module = kwargs["module"]
result = dict()
result["usm_user_info"] = []
need_cfg = False
state = module.params['state']
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
acl_number = module.params['acl_number']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
local_user_name = module.params['aaa_local_user']
if usm_user_name:
if len(usm_user_name) > 32 or len(usm_user_name) == 0:
module.fail_json(
msg='Error: The length of usm_user_name %s is out of [1 - 32].' % usm_user_name)
if remote_engine_id:
if len(remote_engine_id) > 64 or len(remote_engine_id) < 10:
module.fail_json(
msg='Error: The length of remote_engine_id %s is out of [10 - 64].' % remote_engine_id)
conf_str = CE_GET_SNMP_V3_USM_USER_HEADER
if acl_number:
if acl_number.isdigit():
if int(acl_number) > 2999 or int(acl_number) < 2000:
module.fail_json(
msg='Error: The value of acl_number %s is out of [2000 - 2999].' % acl_number)
else:
if not acl_number[0].isalpha() or len(acl_number) > 32 or len(acl_number) < 1:
module.fail_json(
msg='Error: The length of acl_number %s is out of [1 - 32].' % acl_number)
conf_str += ""
if user_group:
if len(user_group) > 32 or len(user_group) == 0:
module.fail_json(
msg='Error: The length of user_group %s is out of [1 - 32].' % user_group)
conf_str += ""
if auth_protocol:
conf_str += ""
if auth_key:
if len(auth_key) > 255 or len(auth_key) == 0:
module.fail_json(
msg='Error: The length of auth_key %s is out of [1 - 255].' % auth_key)
conf_str += ""
if priv_protocol:
if not auth_protocol:
module.fail_json(
msg='Error: Please input auth_protocol at the same time.')
conf_str += ""
if priv_key:
if len(priv_key) > 255 or len(priv_key) == 0:
module.fail_json(
msg='Error: The length of priv_key %s is out of [1 - 255].' % priv_key)
conf_str += ""
conf_str += CE_GET_SNMP_V3_USM_USER_TAIL
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
usm_user_info = root.findall("data/snmp/usmUsers/usmUser")
if usm_user_info:
for tmp in usm_user_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["userName", "remoteEngineID", "engineID", "groupName", "authProtocol",
"authKey", "privProtocol", "privKey", "aclNumber"]:
tmp_dict[site.tag] = site.text
result["usm_user_info"].append(tmp_dict)
if result["usm_user_info"]:
for tmp in result["usm_user_info"]:
if "userName" in tmp.keys():
if state == "present":
if tmp["userName"] != usm_user_name:
need_cfg = True
else:
if tmp["userName"] == usm_user_name:
need_cfg = True
if "remoteEngineID" in tmp.keys():
if remote_engine_id:
enable = "true"
else:
enable = "false"
if state == "present":
if tmp["remoteEngineID"] != enable:
need_cfg = True
else:
if tmp["remoteEngineID"] == enable:
need_cfg = True
if remote_engine_id:
if "engineID" in tmp.keys():
if state == "present":
if tmp["engineID"] != remote_engine_id:
need_cfg = True
else:
if tmp["engineID"] == remote_engine_id:
need_cfg = True
if user_group:
if "groupName" in tmp.keys():
if state == "present":
if tmp["groupName"] != user_group:
need_cfg = True
else:
if tmp["groupName"] == user_group:
need_cfg = True
if auth_protocol:
if "authProtocol" in tmp.keys():
if state == "present":
if tmp["authProtocol"] != auth_protocol:
need_cfg = True
else:
if tmp["authProtocol"] == auth_protocol:
need_cfg = True
if auth_key:
if "authKey" in tmp.keys():
if state == "present":
if tmp["authKey"] != auth_key:
need_cfg = True
else:
if tmp["authKey"] == auth_key:
need_cfg = True
if priv_protocol:
if "privProtocol" in tmp.keys():
if state == "present":
if tmp["privProtocol"] != priv_protocol:
need_cfg = True
else:
if tmp["privProtocol"] == priv_protocol:
need_cfg = True
if priv_key:
if "privKey" in tmp.keys():
if state == "present":
if tmp["privKey"] != priv_key:
need_cfg = True
else:
if tmp["privKey"] == priv_key:
need_cfg = True
if acl_number:
if "aclNumber" in tmp.keys():
if state == "present":
if tmp["aclNumber"] != acl_number:
need_cfg = True
else:
if tmp["aclNumber"] == acl_number:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def check_snmp_v3_local_user_args(self, **kwargs):
""" Check snmp v3 local user invalid args """
module = kwargs["module"]
result = dict()
result["local_user_info"] = []
need_cfg = False
state = module.params['state']
local_user_name = module.params['aaa_local_user']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
usm_user_name = module.params['usm_user_name']
if local_user_name:
if usm_user_name:
module.fail_json(
msg='Error: Please do not input usm_user_name and local_user_name at the same time.')
if not auth_protocol or not auth_key or not priv_protocol or not priv_key:
module.fail_json(
msg='Error: Please input auth_protocol auth_key priv_protocol priv_key for local user.')
if len(local_user_name) > 32 or len(local_user_name) == 0:
module.fail_json(
msg='Error: The length of local_user_name %s is out of [1 - 32].' % local_user_name)
if len(auth_key) > 255 or len(auth_key) == 0:
module.fail_json(
msg='Error: The length of auth_key %s is out of [1 - 255].' % auth_key)
if len(priv_key) > 255 or len(priv_key) == 0:
module.fail_json(
msg='Error: The length of priv_key %s is out of [1 - 255].' % priv_key)
conf_str = CE_GET_SNMP_V3_LOCAL_USER
recv_xml = self.netconf_get_config(module=module, conf_str=conf_str)
if "" in recv_xml:
if state == "present":
need_cfg = True
else:
xml_str = recv_xml.replace('\r', '').replace('\n', '').\
replace('xmlns="urn:ietf:params:xml:ns:netconf:base:1.0"', "").\
replace('xmlns="http://www.huawei.com/netconf/vrp"', "")
root = ElementTree.fromstring(xml_str)
local_user_info = root.findall(
"data/snmp/localUsers/localUser")
if local_user_info:
for tmp in local_user_info:
tmp_dict = dict()
for site in tmp:
if site.tag in ["userName", "authProtocol", "authKey", "privProtocol", "privKey"]:
tmp_dict[site.tag] = site.text
result["local_user_info"].append(tmp_dict)
if result["local_user_info"]:
for tmp in result["local_user_info"]:
if "userName" in tmp.keys():
if state == "present":
if tmp["userName"] != local_user_name:
need_cfg = True
else:
if tmp["userName"] == local_user_name:
need_cfg = True
if auth_protocol:
if "authProtocol" in tmp.keys():
if state == "present":
if tmp["authProtocol"] != auth_protocol:
need_cfg = True
else:
if tmp["authProtocol"] == auth_protocol:
need_cfg = True
if auth_key:
if "authKey" in tmp.keys():
if state == "present":
if tmp["authKey"] != auth_key:
need_cfg = True
else:
if tmp["authKey"] == auth_key:
need_cfg = True
if priv_protocol:
if "privProtocol" in tmp.keys():
if state == "present":
if tmp["privProtocol"] != priv_protocol:
need_cfg = True
else:
if tmp["privProtocol"] == priv_protocol:
need_cfg = True
if priv_key:
if "privKey" in tmp.keys():
if state == "present":
if tmp["privKey"] != priv_key:
need_cfg = True
else:
if tmp["privKey"] == priv_key:
need_cfg = True
result["need_cfg"] = need_cfg
return result
def merge_snmp_v3_usm_user(self, **kwargs):
""" Merge snmp v3 usm user operation """
module = kwargs["module"]
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
acl_number = module.params['acl_number']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
cmds = []
if remote_engine_id:
conf_str = CE_MERGE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "true", remote_engine_id)
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
if not self.local_engine_id:
module.fail_json(
msg='Error: The local engine id is null, please input remote_engine_id.')
conf_str = CE_MERGE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "false", self.local_engine_id)
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if user_group:
conf_str += "%s" % user_group
cmd += " %s" % user_group
if acl_number:
conf_str += "%s" % acl_number
cmd += " acl %s" % acl_number
cmds.append(cmd)
if remote_engine_id:
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if auth_protocol:
conf_str += "%s" % auth_protocol
if auth_protocol != "noAuth":
cmd += " authentication-mode %s" % auth_protocol
if auth_key:
conf_str += "%s" % auth_key
if auth_protocol != "noAuth":
cmd += " cipher %s" % "******"
cmds.append(cmd)
if remote_engine_id:
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if priv_protocol:
conf_str += "%s" % priv_protocol
if auth_protocol != "noAuth" and priv_protocol != "noPriv":
cmd += " privacy-mode %s" % priv_protocol
if priv_key:
conf_str += "%s" % priv_key
if auth_protocol != "noAuth" and priv_protocol != "noPriv":
cmd += " cipher %s" % "******"
cmds.append(cmd)
conf_str += CE_MERGE_SNMP_V3_USM_USER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "" not in recv_xml:
module.fail_json(msg='Error: Merge snmp v3 usm user failed.')
return cmds
def create_snmp_v3_usm_user(self, **kwargs):
""" Create snmp v3 usm user operation """
module = kwargs["module"]
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
acl_number = module.params['acl_number']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
cmds = []
if remote_engine_id:
conf_str = CE_CREATE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "true", remote_engine_id)
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
if not self.local_engine_id:
module.fail_json(
msg='Error: The local engine id is null, please input remote_engine_id.')
conf_str = CE_CREATE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "false", self.local_engine_id)
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if user_group:
conf_str += "%s" % user_group
cmd += " %s" % user_group
if acl_number:
conf_str += "%s" % acl_number
cmd += " acl %s" % acl_number
cmds.append(cmd)
if remote_engine_id:
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if auth_protocol:
conf_str += "%s" % auth_protocol
if auth_protocol != "noAuth":
cmd += " authentication-mode %s" % auth_protocol
if auth_key:
conf_str += "%s" % auth_key
if auth_protocol != "noAuth":
cmd += " cipher %s" % "******"
cmds.append(cmd)
if remote_engine_id:
cmd = "snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
cmd = "snmp-agent usm-user v3 %s" % usm_user_name
if priv_protocol:
conf_str += "%s" % priv_protocol
if auth_protocol != "noAuth" and priv_protocol != "noPriv":
cmd += " privacy-mode %s" % priv_protocol
if priv_key:
conf_str += "%s" % priv_key
if auth_protocol != "noAuth" and priv_protocol != "noPriv":
cmd += " cipher %s" % "******"
cmds.append(cmd)
conf_str += CE_CREATE_SNMP_V3_USM_USER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "" not in recv_xml:
module.fail_json(msg='Error: Create snmp v3 usm user failed.')
return cmds
def delete_snmp_v3_usm_user(self, **kwargs):
""" Delete snmp v3 usm user operation """
module = kwargs["module"]
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
acl_number = module.params['acl_number']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
if remote_engine_id:
conf_str = CE_DELETE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "true", remote_engine_id)
cmd = "undo snmp-agent remote-engineid %s usm-user v3 %s" % (
remote_engine_id, usm_user_name)
else:
if not self.local_engine_id:
module.fail_json(
msg='Error: The local engine id is null, please input remote_engine_id.')
conf_str = CE_DELETE_SNMP_V3_USM_USER_HEADER % (
usm_user_name, "false", self.local_engine_id)
cmd = "undo snmp-agent usm-user v3 %s" % usm_user_name
if user_group:
conf_str += "%s" % user_group
if acl_number:
conf_str += "%s" % acl_number
if auth_protocol:
conf_str += "%s" % auth_protocol
if auth_key:
conf_str += "%s" % auth_key
if priv_protocol:
conf_str += "%s" % priv_protocol
if priv_key:
conf_str += "%s" % priv_key
conf_str += CE_DELETE_SNMP_V3_USM_USER_TAIL
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "" not in recv_xml:
module.fail_json(msg='Error: Delete snmp v3 usm user failed.')
return cmd
def merge_snmp_v3_local_user(self, **kwargs):
""" Merge snmp v3 local user operation """
module = kwargs["module"]
local_user_name = module.params['aaa_local_user']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
conf_str = CE_MERGE_SNMP_V3_LOCAL_USER % (
local_user_name, auth_protocol, auth_key, priv_protocol, priv_key)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "" not in recv_xml:
module.fail_json(msg='Error: Merge snmp v3 local user failed.')
cmd = "snmp-agent local-user v3 %s " % local_user_name + "authentication-mode %s " % auth_protocol + \
"cipher ****** " + "privacy-mode %s " % priv_protocol + "cipher ******"
return cmd
def create_snmp_v3_local_user(self, **kwargs):
""" Create snmp v3 local user operation """
module = kwargs["module"]
local_user_name = module.params['aaa_local_user']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
conf_str = CE_CREATE_SNMP_V3_LOCAL_USER % (
local_user_name, auth_protocol, auth_key, priv_protocol, priv_key)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "" not in recv_xml:
module.fail_json(msg='Error: Create snmp v3 local user failed.')
cmd = "snmp-agent local-user v3 %s " % local_user_name + "authentication-mode %s " % auth_protocol + \
"cipher ****** " + "privacy-mode %s " % priv_protocol + "cipher ******"
return cmd
def delete_snmp_v3_local_user(self, **kwargs):
""" Delete snmp v3 local user operation """
module = kwargs["module"]
local_user_name = module.params['aaa_local_user']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
conf_str = CE_DELETE_SNMP_V3_LOCAL_USER % (
local_user_name, auth_protocol, auth_key, priv_protocol, priv_key)
recv_xml = self.netconf_set_config(module=module, conf_str=conf_str)
if "" not in recv_xml:
module.fail_json(msg='Error: Delete snmp v3 local user failed.')
cmd = "undo snmp-agent local-user v3 %s" % local_user_name
return cmd
def get_snmp_local_engine(self, **kwargs):
""" Get snmp local engine operation """
module = kwargs["module"]
regular = "| include snmp | include local-engineid"
flags = list()
flags.append(regular)
tmp_cfg = get_config(module, flags)
if tmp_cfg:
tmp_data = tmp_cfg.split(r"snmp-agent local-engineid ")
self.local_engine_id = tmp_data[1]
def main():
""" Module main function """
argument_spec = dict(
state=dict(choices=['present', 'absent'], default='present'),
acl_number=dict(type='str'),
usm_user_name=dict(type='str'),
remote_engine_id=dict(type='str'),
user_group=dict(type='str'),
auth_protocol=dict(choices=['noAuth', 'md5', 'sha']),
auth_key=dict(type='str', no_log=True),
priv_protocol=dict(
choices=['noPriv', 'des56', '3des168', 'aes128', 'aes192', 'aes256']),
priv_key=dict(type='str', no_log=True),
aaa_local_user=dict(type='str')
)
mutually_exclusive = [("usm_user_name", "local_user_name")]
argument_spec.update(ce_argument_spec)
module = AnsibleModule(
argument_spec=argument_spec,
mutually_exclusive=mutually_exclusive,
supports_check_mode=True
)
changed = False
proposed = dict()
existing = dict()
end_state = dict()
updates = []
state = module.params['state']
acl_number = module.params['acl_number']
usm_user_name = module.params['usm_user_name']
remote_engine_id = module.params['remote_engine_id']
user_group = module.params['user_group']
auth_protocol = module.params['auth_protocol']
auth_key = module.params['auth_key']
priv_protocol = module.params['priv_protocol']
priv_key = module.params['priv_key']
aaa_local_user = module.params['aaa_local_user']
snmp_user_obj = SnmpUser()
if not snmp_user_obj:
module.fail_json(msg='Error: Init module failed.')
# get proposed
proposed["state"] = state
if acl_number:
proposed["acl_number"] = acl_number
if usm_user_name:
proposed["usm_user_name"] = usm_user_name
if remote_engine_id:
proposed["remote_engine_id"] = remote_engine_id
if user_group:
proposed["user_group"] = user_group
if auth_protocol:
proposed["auth_protocol"] = auth_protocol
if auth_key:
proposed["auth_key"] = auth_key
if priv_protocol:
proposed["priv_protocol"] = priv_protocol
if priv_key:
proposed["priv_key"] = priv_key
if aaa_local_user:
proposed["aaa_local_user"] = aaa_local_user
snmp_v3_usm_user_rst = snmp_user_obj.check_snmp_v3_usm_user_args(
module=module)
snmp_v3_local_user_rst = snmp_user_obj.check_snmp_v3_local_user_args(
module=module)
snmp_user_obj.get_snmp_local_engine(module=module)
# state exist snmp v3 user config
exist_tmp = dict()
for item in snmp_v3_usm_user_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_v3_usm_user_rst[item]
if exist_tmp:
existing["snmp usm user"] = exist_tmp
exist_tmp = dict()
for item in snmp_v3_local_user_rst:
if item != "need_cfg":
exist_tmp[item] = snmp_v3_local_user_rst[item]
if exist_tmp:
existing["snmp local user"] = exist_tmp
if state == "present":
if snmp_v3_usm_user_rst["need_cfg"]:
if len(snmp_v3_usm_user_rst["usm_user_info"]) != 0:
cmd = snmp_user_obj.merge_snmp_v3_usm_user(module=module)
changed = True
updates.append(cmd)
else:
cmd = snmp_user_obj.create_snmp_v3_usm_user(module=module)
changed = True
updates.append(cmd)
if snmp_v3_local_user_rst["need_cfg"]:
if len(snmp_v3_local_user_rst["local_user_info"]) != 0:
cmd = snmp_user_obj.merge_snmp_v3_local_user(
module=module)
changed = True
updates.append(cmd)
else:
cmd = snmp_user_obj.create_snmp_v3_local_user(
module=module)
changed = True
updates.append(cmd)
else:
if snmp_v3_usm_user_rst["need_cfg"]:
cmd = snmp_user_obj.delete_snmp_v3_usm_user(module=module)
changed = True
updates.append(cmd)
if snmp_v3_local_user_rst["need_cfg"]:
cmd = snmp_user_obj.delete_snmp_v3_local_user(module=module)
changed = True
updates.append(cmd)
# state exist snmp v3 user config
snmp_v3_usm_user_rst = snmp_user_obj.check_snmp_v3_usm_user_args(
module=module)
end_tmp = dict()
for item in snmp_v3_usm_user_rst:
if item != "need_cfg":
end_tmp[item] = snmp_v3_usm_user_rst[item]
if end_tmp:
end_state["snmp usm user"] = end_tmp
snmp_v3_local_user_rst = snmp_user_obj.check_snmp_v3_local_user_args(
module=module)
end_tmp = dict()
for item in snmp_v3_local_user_rst:
if item != "need_cfg":
end_tmp[item] = snmp_v3_local_user_rst[item]
if end_tmp:
end_state["snmp local user"] = end_tmp
results = dict()
results['proposed'] = proposed
results['existing'] = existing
results['changed'] = changed
results['end_state'] = end_state
results['updates'] = updates
module.exit_json(**results)
if __name__ == '__main__':
main()
"""Exceptions used throughout package"""
class PipError(Exception):
"""Base pip exception"""
class InstallationError(PipError):
"""General exception during installation"""
class UninstallationError(PipError):
"""General exception during uninstallation"""
class DistributionNotFound(InstallationError):
"""Raised when a distribution cannot be found to satisfy a requirement"""
class BestVersionAlreadyInstalled(PipError):
"""Raised when the most up-to-date version of a package is already
installed. """
class BadCommand(PipError):
"""Raised when virtualenv or a command is not found"""
class CommandError(PipError):
"""Raised when there is an error in command-line arguments"""
class PreviousBuildDirError(PipError):
"""Raised when there's a previous conflicting build directory"""
class HashMismatch(InstallationError):
"""Distribution file hash values don't match."""
class InvalidWheelFilename(InstallationError):
"""Invalid wheel filename."""
class UnsupportedWheel(InstallationError):
"""Unsupported wheel."""
#! /usr/bin/env python
# coding=utf-8
#############################################################################
# #
# File: available_fetchserver.py #
# #
# Copyright (C) 2008 Du XiaoGang #
# #
# Home: http://gappproxy.googlecode.com #
# #
# This file is part of GAppProxy. #
# #
# GAppProxy is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as #
# published by the Free Software Foundation, either version 3 of the #
# License, or (at your option) any later version. #
# #
# GAppProxy is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with GAppProxy. If not, see . #
# #
#############################################################################
import wsgiref.handlers
from google.appengine.ext import webapp
import random
class MainHandler(webapp.RequestHandler):
def get(self):
fss = ['http://fetchserver1.appspot.com/fetch.py',
'http://fetchserver2.appspot.com/fetch.py',
'http://fetchserver3.appspot.com/fetch.py',
'http://wcm.appspot.com/fetch.py',
'http://fetchserver-nolog.appspot.com/fetch.py']
self.response.headers['Content-Type'] = 'text/plain'
self.response.out.write(fss[random.randint(0, len(fss) - 1)])
def main():
application = webapp.WSGIApplication([('/available_fetchserver.py', MainHandler)])
wsgiref.handlers.CGIHandler().run(application)
if __name__ == '__main__':
main()
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Class to represent a device."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
class DeviceSpec(object):
"""Represents a (possibly partial) specification for a TensorFlow device.
`DeviceSpec`s are used throughout TensorFlow to describe where state is stored
and computations occur. Using `DeviceSpec` allows you to parse device spec
strings to verify their validity, merge them or compose them programmatically.
Example:
```python
# Place the operations on device "GPU:0" in the "ps" job.
device_spec = DeviceSpec(job="ps", device_type="GPU", device_index=0)
with tf.device(device_spec):
# Both my_var and squared_var will be placed on /job:ps/device:GPU:0.
my_var = tf.Variable(..., name="my_variable")
squared_var = tf.square(my_var)
```
If a `DeviceSpec` is partially specified, it will be merged with other
`DeviceSpec`s according to the scope in which it is defined. `DeviceSpec`
components defined in inner scopes take precedence over those defined in
outer scopes.
```python
with tf.device(DeviceSpec(job="train", )):
with tf.device(DeviceSpec(job="ps", device_type="GPU", device_index=0):
# Nodes created here will be assigned to /job:ps/device:GPU:0.
with tf.device(DeviceSpec(device_type="GPU", device_index=1):
# Nodes created here will be assigned to /job:train/device:GPU:1.
```
A `DeviceSpec` consists of 5 components -- each of
which is optionally specified:
* Job: The job name.
* Replica: The replica index.
* Task: The task index.
* Device type: The device type string (e.g. "CPU" or "GPU").
* Device index: The device index.
"""
def __init__(self, job=None, replica=None, task=None, device_type=None,
device_index=None):
"""Create a new `DeviceSpec` object.
Args:
job: string. Optional job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left
unspecified, device represents 'any' device_index.
"""
self.job = job
self.replica = replica
self.task = task
if device_type == "cpu" or device_type == "gpu":
# For backwards compatibility only, we support lowercase variants of
# cpu and gpu but turn them into uppercase here.
self.device_type = device_type.upper()
else:
self.device_type = device_type
self.device_index = device_index
def _clear(self):
self._job = None
self._replica = None
self._task = None
self.device_type = None
self.device_index = None
@property
def job(self):
return self._job
@job.setter
def job(self, job):
if job is not None:
self._job = str(job)
else:
self._job = None
@property
def replica(self):
return self._replica
@replica.setter
def replica(self, replica):
if replica is not None:
self._replica = int(replica)
else:
self._replica = None
@property
def task(self):
return self._task
@task.setter
def task(self, task):
if task is not None:
self._task = int(task)
else:
self._task = None
def parse_from_string(self, spec):
"""Parse a `DeviceSpec` name into its components.
Args:
spec: a string of the form
/job:/replica:/task:/device:CPU:
or
/job:/replica:/task:/device:GPU:
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
The `DeviceSpec`.
Raises:
ValueError: if the spec was not valid.
"""
self._clear()
splits = [x.split(":") for x in spec.split("/")]
for y in splits:
ly = len(y)
if y:
# NOTE(touts): we use the property getters here.
if ly == 2 and y[0] == "job":
self.job = y[1]
elif ly == 2 and y[0] == "replica":
self.replica = y[1]
elif ly == 2 and y[0] == "task":
self.task = y[1]
elif ((ly == 1 or ly == 2) and
((y[0].upper() == "GPU") or (y[0].upper() == "CPU"))):
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[0].upper()
if ly == 2 and y[1] != "*":
self.device_index = int(y[1])
elif ly == 3 and y[0] == "device":
if self.device_type is not None:
raise ValueError("Cannot specify multiple device types: %s" % spec)
self.device_type = y[1]
if y[2] != "*":
self.device_index = int(y[2])
elif ly and y[0] != "": # pylint: disable=g-explicit-bool-comparison
raise ValueError("Unknown attribute: '%s' in '%s'" % (y[0], spec))
return self
def merge_from(self, dev):
"""Merge the properties of "dev" into this `DeviceSpec`.
Args:
dev: a `DeviceSpec`.
"""
if dev.job is not None:
self.job = dev.job
if dev.replica is not None:
self.replica = dev.replica
if dev.task is not None:
self.task = dev.task
if dev.device_type is not None:
self.device_type = dev.device_type
if dev.device_index is not None:
self.device_index = dev.device_index
def to_string(self):
"""Return a string representation of this `DeviceSpec`.
Returns:
a string of the form
/job:/replica:/task:/device::.
"""
dev = ""
if self.job is not None:
dev += "/job:" + self.job
if self.replica is not None:
dev += "/replica:" + str(self.replica)
if self.task is not None:
dev += "/task:" + str(self.task)
if self.device_type is not None:
device_index_string = "*"
if self.device_index is not None:
device_index_string = str(self.device_index)
dev += "/device:%s:%s" % (self.device_type, device_index_string)
return dev
@staticmethod
def from_string(spec):
"""Construct a `DeviceSpec` from a string.
Args:
spec: a string of the form
/job:/replica:/task:/device:CPU:
or
/job:/replica:/task:/device:GPU:
as cpu and gpu are mutually exclusive.
All entries are optional.
Returns:
A DeviceSpec.
"""
return DeviceSpec().parse_from_string(spec)
def check_valid(spec):
"""Check that a device spec is valid.
Args:
spec: a string.
Raises:
An exception if the spec is invalid.
"""
# Construct a DeviceSpec. It will assert a failure if spec is invalid.
DeviceSpec.from_string(spec)
def canonical_name(device):
"""Returns a canonical name for the given `DeviceSpec` or device name."""
if device is None:
return ""
if isinstance(device, DeviceSpec):
return device.to_string()
else:
device = DeviceSpec.from_string(device)
return device.to_string()
def merge_device(spec):
"""Returns a device function that merges devices specifications.
This can be used to merge partial specifications of devices. The
innermost setting for a device field takes precedence. For example:
with tf.device(merge_device("/device:GPU:0"))
# Nodes created here have device "/device:GPU:0"
with tf.device(merge_device("/job:worker")):
# Nodes created here have device "/job:worker/device:GPU:0"
with tf.device(merge_device("/device:CPU:0")):
# Nodes created here have device "/job:worker/device:CPU:0"
with tf.device(merge_device("/job:ps")):
# Nodes created here have device "/job:ps/device:CPU:0"
Args:
spec: A `DeviceSpec` or a device spec string (partially) describing the
device that should be used for all nodes created in the scope of
the returned device function's with block.
Returns:
A device function with the above-described behavior.
Raises:
ValueError: if the spec was not valid.
"""
if not isinstance(spec, DeviceSpec):
spec = DeviceSpec.from_string(spec or "")
def _device_function(node_def):
current_device = DeviceSpec.from_string(node_def.device or "")
copy_spec = copy.copy(spec)
copy_spec.merge_from(current_device) # current_device takes precedence.
return copy_spec
return _device_function
from __future__ import absolute_import
import contextlib
import logging
import logging.handlers
import os
try:
import threading
except ImportError:
import dummy_threading as threading
from pip.compat import WINDOWS
from pip.utils import ensure_dir
try:
from pip._vendor import colorama
# Lots of different errors can come from this, including SystemError and
# ImportError.
except Exception:
colorama = None
_log_state = threading.local()
_log_state.indentation = 0
@contextlib.contextmanager
def indent_log(num=2):
"""
A context manager which will cause the log output to be indented for any
log messages emited inside it.
"""
_log_state.indentation += num
try:
yield
finally:
_log_state.indentation -= num
def get_indentation():
return getattr(_log_state, 'indentation', 0)
class IndentingFormatter(logging.Formatter):
def format(self, record):
"""
Calls the standard formatter, but will indent all of the log messages
by our current indentation level.
"""
formatted = logging.Formatter.format(self, record)
formatted = "".join([
(" " * get_indentation()) + line
for line in formatted.splitlines(True)
])
return formatted
def _color_wrap(*colors):
def wrapped(inp):
return "".join(list(colors) + [inp, colorama.Style.RESET_ALL])
return wrapped
class ColorizedStreamHandler(logging.StreamHandler):
# Don't build up a list of colors if we don't have colorama
if colorama:
COLORS = [
# This needs to be in order from highest logging level to lowest.
(logging.ERROR, _color_wrap(colorama.Fore.RED)),
(logging.WARNING, _color_wrap(colorama.Fore.YELLOW)),
]
else:
COLORS = []
def __init__(self, stream=None):
logging.StreamHandler.__init__(self, stream)
if WINDOWS and colorama:
self.stream = colorama.AnsiToWin32(self.stream)
def should_color(self):
# Don't colorize things if we do not have colorama
if not colorama:
return False
real_stream = (
self.stream if not isinstance(self.stream, colorama.AnsiToWin32)
else self.stream.wrapped
)
# If the stream is a tty we should color it
if hasattr(real_stream, "isatty") and real_stream.isatty():
return True
# If we have an ASNI term we should color it
if os.environ.get("TERM") == "ANSI":
return True
# If anything else we should not color it
return False
def format(self, record):
msg = logging.StreamHandler.format(self, record)
if self.should_color():
for level, color in self.COLORS:
if record.levelno >= level:
msg = color(msg)
break
return msg
class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler):
def _open(self):
ensure_dir(os.path.dirname(self.baseFilename))
return logging.handlers.RotatingFileHandler._open(self)
class MaxLevelFilter(logging.Filter):
def __init__(self, level):
self.level = level
def filter(self, record):
return record.levelno < self.level
import io, sys
class Sudoku():
def __init__(self, textfile):
puzzle = open(textfile).readlines()
class Position():
def __init__(self, row, column, box):
self.row = row
self.column = column
self.box = box
if __name__ == '__main__':
puzzle = open(sys.argv[1]).readlines()
rows = []
columns = [set() for i in range(9)]
boxes = [[set() for i in range(3)] for j in range(3)]
i = 0
for line in puzzle:
data = line.split()
data = [int(x) for x in data]
rows.append(set(data))
for j in range(9):
columns[j].add(data[j])
boxes[i//3][j // 3].add(data[j])
i += 1
row_results = [len(row_set) == 9 for row_set in rows]
column_results = [len(col_set) == 9 for col_set in columns]
if not all(row_results):
print("False, row")
sys.exit(0)
elif not all(column_results):
print("False, col")
sys.exit(0)
for box_set in boxes:
box_results = [len(box) == 9 for box in box_set]
if not all(box_results):
print(False)
sys.exit(0)
print(True)
sys.exit(0)
#! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, sample_period = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
import os
import sys
import unittest
# Bob Ippolito:
"""
Ok.. the code to find the filename for __getattr__ should look
something like:
import os
from macholib.dyld import dyld_find
def find_lib(name):
possible = ['lib'+name+'.dylib', name+'.dylib',
name+'.framework/'+name]
for dylib in possible:
try:
return os.path.realpath(dyld_find(dylib))
except ValueError:
pass
raise ValueError, "%s not found" % (name,)
It'll have output like this:
>>> find_lib('pthread')
'/usr/lib/libSystem.B.dylib'
>>> find_lib('z')
'/usr/lib/libz.1.dylib'
>>> find_lib('IOKit')
'/System/Library/Frameworks/IOKit.framework/Versions/A/IOKit'
-bob
"""
from ctypes.macholib.dyld import dyld_find
def find_lib(name):
possible = ['lib'+name+'.dylib', name+'.dylib', name+'.framework/'+name]
for dylib in possible:
try:
return os.path.realpath(dyld_find(dylib))
except ValueError:
pass
raise ValueError("%s not found" % (name,))
class MachOTest(unittest.TestCase):
if sys.platform == "darwin":
def test_find(self):
self.failUnlessEqual(find_lib('pthread'),
'/usr/lib/libSystem.B.dylib')
result = find_lib('z')
self.failUnless(result.startswith('/usr/lib/libz.1'))
self.failUnless(result.endswith('.dylib'))
self.failUnlessEqual(find_lib('IOKit'),