# If the current time is inside an allocated slot, then packets
# may be sent.
in_slot_mock = PropertyMock(return_value=True)
with patch.object(TDMA_Scheduler, "in_slot", new_callable=in_slot_mock):
self.rf_sensor._send()
calls = send_tx_frame_mock.call_args_list
# RSSI broadcast packets must be sent to all sensors in the network
# (excluding ourself). Note that we do not inspect the packet contents
# other than the specification because that is covered in the test
# for the `_create_rssi_broadcast_packet` method.
for to_id in xrange(1, self.rf_sensor.number_of_sensors + 1):
if to_id == self.rf_sensor.id:
continue
packet, to = calls.pop(0)[0]
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "rssi_broadcast")
self.assertEqual(to, to_id)
# RSSI ground station packets must be sent to the ground station.
# The packet list must be empty afterwards. We added one packet to the
# list at the start of this test, so we must detect it here.
packet, to = calls.pop(0)[0]
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "rssi_broadcast")
self.assertEqual(to, 0)
self.assertEqual(self.rf_sensor._packets.qsize(), 0)
send_tx_frame_mock.reset_mock()
# If the current time is not inside an allocated slot, then no
# packets may be sent.
in_slot_mock = PropertyMock(return_value=False)
with patch.object(TDMA_Scheduler, "in_slot", new_callable=in_slot_mock):
self.rf_sensor._send()
send_tx_frame_mock.assert_not_called()
def test_send_custom_packets(self):
self.packet.set("specification", "waypoint_clear")
self.packet.set("to_id", 2)
self.rf_sensor.enqueue(self.packet, to=2)
with patch.object(RF_Sensor, "_send_tx_frame") as send_tx_frame_mock:
self.rf_sensor._send_custom_packets()
# Custom packets must be sent to their destinations.
packet, to = send_tx_frame_mock.call_args[0]
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "waypoint_clear")
self.assertEqual(packet.get("to_id"), 2)
self.assertEqual(to, 2)
self.assertEqual(self.rf_sensor._custom_packets.qsize(), 0)
def test_send_tx_frame(self):
# Having a closed connection raises an exception.
with self.assertRaises(DisabledException):
self.rf_sensor._send_tx_frame(self.packet, to=2)
with patch.object(self.rf_sensor, "_connection"):
# Providing an invalid packet raises an exception.
with self.assertRaises(TypeError):
self.rf_sensor._send_tx_frame(None, to=2)
# Providing an invalid destination raises an exception.
with self.assertRaises(TypeError):
self.rf_sensor._send_tx_frame(self.packet)
def test_receive(self):
# Verify that the interface requires subclasses to implement
# the `_receive` method.
with self.assertRaises(NotImplementedError):
self.rf_sensor._receive(packet=self.packet)
def test_create_rssi_broadcast_packet(self):
packet = self.rf_sensor._create_rssi_broadcast_packet(2)
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "rssi_broadcast")
self.assertEqual(packet.get("latitude"), 0)
self.assertEqual(packet.get("longitude"), 0)
self.assertTrue(packet.get("valid"))
self.assertEqual(packet.get("waypoint_index"), 0)
self.assertEqual(packet.get("sensor_id"), self.rf_sensor.id)
self.assertAlmostEqual(packet.get("timestamp"), time.time(), delta=0.1)
def test_create_rssi_ground_station_packet(self):
rssi_broadcast_packet = self.rf_sensor._create_rssi_broadcast_packet(2)
packet = self.rf_sensor._create_rssi_ground_station_packet(rssi_broadcast_packet)
self.assertIsInstance(packet, Packet)
self.assertEqual(packet.get("specification"), "rssi_ground_station")
self.assertEqual(packet.get("sensor_id"), self.rf_sensor.id)
self.assertEqual(packet.get("from_latitude"), rssi_broadcast_packet.get("latitude"))
self.assertEqual(packet.get("from_longitude"), rssi_broadcast_packet.get("longitude"))
self.assertEqual(packet.get("from_valid"), rssi_broadcast_packet.get("valid"))
self.assertEqual(packet.get("to_latitude"), 0)
self.assertEqual(packet.get("to_longitude"), 0)
self.assertTrue(packet.get("to_valid"))
# -*- coding: utf-8 -*-
"""
A list of Mexican states for use as `choices` in a formfield.
This exists in this standalone file so that it's only imported into memory
when explicitly needed.
"""
from django.utils.translation import ugettext_lazy as _
STATE_CHOICES = (
('AGU', _(u'Aguascalientes')),
('BCN', _(u'Baja California')),
('BCS', _(u'Baja California Sur')),
('CAM', _(u'Campeche')),
('CHH', _(u'Chihuahua')),
('CHP', _(u'Chiapas')),
('COA', _(u'Coahuila')),
('COL', _(u'Colima')),
('DIF', _(u'Distrito Federal')),
('DUR', _(u'Durango')),
('GRO', _(u'Guerrero')),
('GUA', _(u'Guanajuato')),
('HID', _(u'Hidalgo')),
('JAL', _(u'Jalisco')),
('MEX', _(u'Estado de México')),
('MIC', _(u'Michoacán')),
('MOR', _(u'Morelos')),
('NAY', _(u'Nayarit')),
('NLE', _(u'Nuevo León')),
('OAX', _(u'Oaxaca')),
('PUE', _(u'Puebla')),
('QUE', _(u'Querétaro')),
('ROO', _(u'Quintana Roo')),
('SIN', _(u'Sinaloa')),
('SLP', _(u'San Luis Potosí')),
('SON', _(u'Sonora')),
('TAB', _(u'Tabasco')),
('TAM', _(u'Tamaulipas')),
('TLA', _(u'Tlaxcala')),
('VER', _(u'Veracruz')),
('YUC', _(u'Yucatán')),
('ZAC', _(u'Zacatecas')),
)
# ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008 Daniel Moisset, Ricardo Quesada, Rayentray Tappa, Lucio Torre
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Interval Action
Interval Actions
================
An interval action is an action that takes place within a certain period of time.
It has an start time, and a finish time. The finish time is the parameter
``duration`` plus the start time.
These `IntervalAction` have some interesting properties, like:
- They can run normally (default)
- They can run reversed with the `Reverse` action.
- They can run with the time altered with the `Accelerate`, `AccelDeccel` and
`Speed` actions.
For example, you can simulate a Ping Pong effect running the action normally and
then running it again in Reverse mode.
Example::
ping_pong_action = action + Reverse( action )
Available IntervalActions
=========================
* `MoveTo`
* `MoveBy`
* `JumpTo`
* `JumpBy`
* `Bezier`
* `Blink`
* `RotateTo`
* `RotateBy`
* `ScaleTo`
* `ScaleBy`
* `FadeOut`
* `FadeIn`
* `FadeTo`
* `Delay`
* `RandomDelay`
Modifier actions
================
* `Accelerate`
* `AccelDeccel`
* `Speed`
Examples::
move = MoveBy( (200,0), duration=5 ) # Moves 200 pixels to the right in 5 seconds.
move = MoveTo( (320,240), duration=5) # Moves to the pixel (320,240) in 5 seconds
jump = JumpBy( (320,0), 100, 5, duration=5) # Jumps to the right 320 pixels
# doing 5 jumps of 100 pixels
# of height in 5 seconds
accel_move = Accelerate(move) # accelerates action move
'''
__docformat__ = 'restructuredtext'
import random
import copy
import math
from base_actions import *
from cocos.euclid import *
__all__ = [ 'Lerp', # interpolation
'MoveTo','MoveBy', # movement actions
'Jump', 'JumpTo', 'JumpBy',
'Bezier', # complex movement actions
'Rotate',"RotateTo", "RotateBy", # object rotation
'ScaleTo','ScaleBy', # object scale
'Delay','RandomDelay', # Delays
'FadeOut','FadeIn','FadeTo', # Fades in/out action
'Blink', # Blink action
'Accelerate','AccelDeccel','Speed', # Time alter actions
]
class Lerp( IntervalAction ):
"""
Interpolate between values for some specified attribute
"""
def init(self, attrib, start, end, duration):
"""Init method.
:Parameters:
`attrib` : string
The name of the attrbiute where the value is stored
`start` : float
The start value
`end` : float
The end value
`duration` : float
Duration time in seconds
"""
self.attrib = attrib
self.duration = duration
self.start_p = start
self.end_p = end
self.delta = end-start
def update(self, t):
setattr(self.target, self.attrib,
self.start_p + self.delta * t
)
def __reversed__(self):
return Lerp(self.attrib, self.end_p, self.start_p, self.duration)
class RotateBy( IntervalAction ):
"""Rotates a `CocosNode` object clockwise a number of degrees
by modiying it's rotation attribute.
Example::
# rotates the sprite 180 degrees in 2 seconds
action = RotateBy( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Degrees that the sprite will be rotated.
Positive degrees rotates the sprite clockwise.
`duration` : float
Duration time in seconds
"""
self.angle = angle #: Quantity of degrees to rotate
self.duration = duration #: Duration in seconds
def start( self ):
self.start_angle = self.target.rotation
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateBy(-self.angle, self.duration)
Rotate = RotateBy
class RotateTo( IntervalAction ):
"""Rotates a `CocosNode` object to a certain angle by modifying it's
rotation attribute.
The direction will be decided by the shortest angle.
Example::
# rotates the sprite to angle 180 in 2 seconds
action = RotateTo( 180, 2 )
sprite.do( action )
"""
def init(self, angle, duration ):
"""Init method.
:Parameters:
`angle` : float
Destination angle in degrees.
`duration` : float
Duration time in seconds
"""
self.angle = angle%360 #: Destination angle in degrees
self.duration = duration #: Duration in seconds
def start( self ):
ea = self.angle
sa = self.start_angle = (self.target.rotation%360)
self.angle = ((ea%360) - (sa%360))
if self.angle > 180:
self.angle = -360+self.angle
if self.angle < -180:
self.angle = 360+self.angle
def update(self, t):
self.target.rotation = (self.start_angle + self.angle * t ) % 360
def __reversed__(self):
return RotateTo(-self.angle, self.duration)
class Speed( IntervalAction ):
"""
Changes the speed of an action, making it take longer (speed>1)
or less (speed<1)
Example::
# rotates the sprite 180 degrees in 1 secondclockwise
action = Speed( Rotate( 180, 2 ), 2 )
sprite.do( action )
"""
def init(self, other, speed ):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`speed` : float
The speed change. 1 is no change.
2 means twice as fast, takes half the time
0.5 means half as fast, takes double the time
"""
self.other = other
self.speed = speed
self.duration = other.duration/speed
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
self.other.update( t )
def __reversed__(self):
return Speed( Reverse( self.other ), self.speed )
class Accelerate( IntervalAction ):
"""
Changes the acceleration of an action
Example::
# rotates the sprite 180 degrees in 2 seconds clockwise
# it starts slow and ends fast
action = Accelerate( Rotate( 180, 2 ), 4 )
sprite.do( action )
"""
def init(self, other, rate = 2):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
`rate` : float
The acceleration rate. 1 is linear.
the new t is t**rate
"""
self.other = other
self.rate = rate
self.duration = other.duration
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
self.other.update( t**self.rate )
def __reversed__(self):
return Accelerate(Reverse(self.other), 1.0/self.rate)
class AccelDeccel( IntervalAction ):
"""
Makes an action change the travel speed but retain near normal
speed at the beginning and ending.
Example::
# rotates the sprite 180 degrees in 2 seconds clockwise
# it starts slow, gets fast and ends slow
action = AccelDeccel( RotateBy( 180, 2 ) )
sprite.do( action )
"""
def init(self, other):
"""Init method.
:Parameters:
`other` : IntervalAction
The action that will be affected
"""
self.other = other
self.duration = other.duration
def start(self):
self.other.target = self.target
self.other.start()
def update(self, t):
ft = (t-0.5) * 12
nt = 1./( 1. + math.exp(-ft) )
self.other.update( nt )
def __reversed__(self):
return AccelDeccel( Reverse(self.other) )
class MoveTo( IntervalAction ):
"""Moves a `CocosNode` object to the position x,y. x and y are absolute coordinates
by modifying it's position attribute.
Example::
# Move the sprite to coords x=50, y=10 in 8 seconds
action = MoveTo( (50,10), 8 )
sprite.do( action )
"""
def init(self, dst_coords, duration=5):
"""Init method.
:Parameters:
`dst_coords` : (x,y)
Coordinates where the sprite will be placed at the end of the action
`duration` : float
Duration time in seconds
"""
self.end_position = Point2( *dst_coords )
self.duration = duration
def start( self ):
self.start_position = self.target.position
self.delta = self.end_position-self.start_position
def update(self,t):
self.target.position = self.start_position + self.delta * t
class MoveBy( MoveTo ):
"""Moves a `CocosNode` object x,y pixels by modifying it's
position attribute.
x and y are relative to the position of the object.
Duration is is seconds.
Example::
# Move the sprite 50 pixels to the left in 8 seconds
action = MoveBy( (-50,0), 8 )
sprite.do( action )
"""
def init(self, delta, duration=5):
"""Init method.
:Parameters:
`delta` : (x,y)
Delta coordinates
`duration` : float
Duration time in seconds
"""
self.delta = Point2( *delta )
self.duration = duration
def start( self ):
self.start_position = self.target.position
self.end_position = self.start_position + self.delta
def __reversed__(self):
return MoveBy(-self.delta, self.duration)
class FadeOut( IntervalAction ):
"""Fades out a `CocosNode` object by modifying it's opacity attribute.
Example::
action = FadeOut( 2 )
sprite.do( action )
"""
def init( self, duration ):
"""Init method.
:Parameters:
`duration` : float
Seconds that it will take to fade
"""
self.duration = duration
def update( self, t ):
self.target.opacity = 255 * (1-t)
def __reversed__(self):
return FadeIn( self.duration )
class FadeTo( IntervalAction ):
"""Fades a `CocosNode` object to a specific alpha value by modifying it's opacity attribute.
Example::
action = FadeTo( 128, 2 )
sprite.do( action )
"""
def init( self, alpha, duration ):
"""Init method.
:Parameters:
`alpha` : float
0-255 value of opacity
`duration` : float
Seconds that it will take to fade
"""
self.alpha = alpha
self.duration = duration
def start(self):
self.start_alpha = self.target.opacity
def update( self, t ):
self.target.opacity = self.start_alpha + (
self.alpha - self.start_alpha
) * t
class FadeIn( FadeOut):
"""Fades in a `CocosNode` object by modifying it's opacity attribute.
Example::
action = FadeIn( 2 )
sprite.do( action )
"""
def update( self, t ):
self.target.opacity = 255 * t
def __reversed__(self):
return FadeOut( self.duration )
class ScaleTo(IntervalAction):
"""Scales a `CocosNode` object to a zoom factor by modifying it's scale attribute.
Example::
# scales the sprite to 5x in 2 seconds
action = ScaleTo( 5, 2 )
sprite.do( action )
"""
def init(self, scale, duration=5 ):
"""Init method.
:Parameters:
`scale` : float
scale factor
`duration` : float
Duration time in seconds
"""
self.end_scale = scale
self.duration = duration
def start( self ):
self.start_scale = self.target.scale
self.delta = self.end_scale-self.start_scale
def update(self, t):
self.target.scale = self.start_scale + self.delta * t
class ScaleBy(ScaleTo):
"""Scales a `CocosNode` object a zoom factor by modifying it's scale attribute.
Example::
# scales the sprite by 5x in 2 seconds
action = ScaleBy( 5, 2 )
sprite.do( action )
"""
def start( self ):
self.start_scale = self.target.scale
self.delta = self.start_scale*self.end_scale - self.start_scale
def __reversed__(self):
return ScaleBy( 1.0/self.end_scale, self.duration )
class Blink( IntervalAction ):
"""Blinks a `CocosNode` object by modifying it's visible attribute
Example::
# Blinks 10 times in 2 seconds
action = Blink( 10, 2 )
sprite.do( action )
"""
def init(self, times, duration):
"""Init method.
:Parameters:
`times` : integer
Number of times to blink
`duration` : float
Duration time in seconds
"""
self.times = times
self.duration = duration
def update(self, t):
slice = 1 / float( self.times )
m = t % slice
self.target.visible = (m > slice / 2.0)
def __reversed__(self):
return self
class Bezier( IntervalAction ):
"""Moves a `CocosNode` object through a bezier path by modifying it's position attribute.
Example::
action = Bezier( bezier_conf.path1, 5 ) # Moves the sprite using the
sprite.do( action ) # bezier path 'bezier_conf.path1'
# in 5 seconds
"""
def init(self, bezier, duration=5, forward=True):
"""Init method
:Parameters:
`bezier` : bezier_configuration instance
A bezier configuration
`duration` : float
Duration time in seconds
"""
self.duration = duration
self.bezier = bezier
self.forward = forward
def start( self ):
self.start_position = self.target.position
def update(self,t):
if self.forward:
p = self.bezier.at( t )
else:
p = self.bezier.at( 1-t )
self.target.position = ( self.start_position +Point2( *p ) )
def __reversed__(self):
return Bezier(self.bezier, self.duration, not self.forward)
class Jump(IntervalAction):
"""Moves a `CocosNode` object simulating a jump movement by modifying it's position attribute.
Example::
action = Jump(50,200, 5, 6) # Move the sprite 200 pixels to the right
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 50 pixels of height
"""
def init(self, y=150, x=120, jumps=1, duration=5):
"""Init method
:Parameters:
`y` : integer
Height of jumps
`x` : integer
horizontal movement relative to the startin position
`jumps` : integer
quantity of jumps
`duration` : float
Duration time in seconds
"""
import warnings
warnings.warn('Deprecated "Jump" action. Consider using JumpBy instead', DeprecationWarning)
self.y = y
self.x = x
self.duration = duration
self.jumps = jumps
def start( self ):
self.start_position = self.target.position
def update(self, t):
y = int( self.y * abs( math.sin( t * math.pi * self.jumps ) ) )
x = self.x * t
self.target.position = self.start_position + Point2(x,y)
def __reversed__(self):
return Jump(self.y, -self.x, self.jumps, self.duration)
class JumpBy(IntervalAction):
"""Moves a `CocosNode` object simulating a jump movement by modifying it's position attribute.
Example::
# Move the sprite 200 pixels to the right and up
action = JumpBy((100,100),200, 5, 6)
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 200 pixels of height
"""
def init(self, position=(0,0), height=100, jumps=1, duration=5):
"""Init method
:Parameters:
`position` : integer x integer tuple
horizontal and vertical movement relative to the
starting position
`height` : integer
Height of jumps
`jumps` : integer
quantity of jumps
`duration` : float
Duration time in seconds
"""
self.position = position
self.height = height
self.duration = duration
self.jumps = jumps
def start( self ):
self.start_position = self.target.position
self.delta = Vector2(*self.position)
def update(self, t):
y = int( self.height * abs( math.sin( t * math.pi * self.jumps ) ) )
y += self.delta[1] * t
x = self.delta[0] * t
self.target.position = self.start_position + Point2(x,y)
def __reversed__(self):
return JumpBy( (-self.position[0],-self.position[1]), self.height, self.jumps, self.duration)
class JumpTo(JumpBy):
"""Moves a `CocosNode` object to a position simulating a jump movement by modifying
it's position attribute.
Example::
action = JumpTo(50,200, 5, 6) # Move the sprite 200 pixels to the right
sprite.do( action ) # in 6 seconds, doing 5 jumps
# of 50 pixels of height
"""
def start( self ):
self.start_position = self.target.position
self.delta = Vector2(*self.position)-self.start_position
class Delay(IntervalAction):
"""Delays the action a certain amount of seconds
Example::
action = Delay(2.5)
sprite.do( action )
"""
def init(self, delay):
"""Init method
:Parameters:
`delay` : float
Seconds of delay
"""
self.duration = delay
def __reversed__(self):
return self
class RandomDelay(Delay):
"""Delays the actions between *min* and *max* seconds
Example::
action = RandomDelay(2.5, 4.5) # delays the action between 2.5 and 4.5 seconds
sprite.do( action )
"""
def init(self, low, hi):
"""Init method
:Parameters:
`low` : float
Minimun seconds of delay
`hi` : float
Maximun seconds of delay
"""
self.low = low
self.hi = hi
def __deepcopy__(self, memo):
new = copy.copy(self)
new.duration = self.low + (random.random() * (self.hi - self.low))
return new
# -*- coding: utf-8 -*-
#
# This file is part of the bliss project
#
# Copyright (c) 2016 Beamline Control Unit, ESRF
# Distributed under the GNU LGPLv3. See LICENSE for more info.
import importlib
from bliss.common.tango import DeviceProxy
from bliss.config import settings
from .bpm import Bpm
from .roi import Roi, RoiCounters
class Lima(object):
ROI_COUNTERS = 'roicounter'
BPM = 'beamviewer'
class Image(object):
ROTATION_0,ROTATION_90,ROTATION_180,ROTATION_270 = range(4)
def __init__(self,proxy):
self._proxy = proxy
@property
def proxy(self):
return self._proxy
@property
def bin(self):
return self._proxy.image_bin
@bin.setter
def bin(self,values):
self._proxy.image_bin = values
@property
def flip(self):
return self._proxy.image_flip
@flip.setter
def flip(self,values):
self._proxy.image_flip = values
@property
def roi(self):
return Roi(*self._proxy.image_roi)
@roi.setter
def roi(self,roi_values):
if len(roi_values) == 4:
self._proxy.image_roi = roi_values
elif isinstance(roi_values[0],Roi):
roi = roi_values[0]
self._proxy.image_roi = (roi.x,roi.y,
roi.width,roi.height)
else:
raise TypeError("Lima.image: set roi only accepts roi (class)"
" or (x,y,width,height) values")
@property
def rotation(self):
rot_str = self._proxy.image_rotation
return {'NONE' : self.ROTATION_0,
'90' : self.ROTATION_90,
'180' : self.ROTATION_180,
'270' : self.ROTATION_270}.get(rot_str)
@rotation.setter
def rotation(self,rotation):
if isinstance(rotation,(str,unicode)):
self._proxy.image_rotation = rotation
else:
rot_str = {self.ROTATION_0 : 'NONE',
self.ROTATION_90 : '90',
self.ROTATION_180 : '180',
self.ROTATION_270 : '270'}.get(rotation)
if rot_str is None:
raise ValueError("Lima.image: rotation can only be 0,90,180 or 270")
self._proxy.image_rotation = rot_str
class Acquisition(object):
ACQ_MODE_SINGLE,ACQ_MODE_CONCATENATION,ACQ_MODE_ACCUMULATION = range(3)
def __init__(self,proxy):
self._proxy = proxy
acq_mode = (("SINGLE",self.ACQ_MODE_SINGLE),
("CONCATENATION",self.ACQ_MODE_CONCATENATION),
("ACCUMULATION",self.ACQ_MODE_ACCUMULATION))
self.__acq_mode_from_str = dict(acq_mode)
self.__acq_mode_from_enum = dict(((y,x) for x,y in acq_mode))
@property
def exposition_time(self):
"""
exposition time for a frame
"""
return self._proxy.acq_expo_time
@exposition_time.setter
def exposition_time(self,value):
self._proxy.acq_expo_time = value
@property
def mode(self):
"""
acquisition mode (SINGLE,CONCATENATION,ACCUMULATION)
"""
acq_mode = self._proxy.acq_mode
return self.__acq_mode_from_str.get(acq_mode)
@mode.setter
def mode(self,value):
mode_str = self.__acq_mode_from_enum.get(value)
if mode_str is None:
possible_modes = ','.join(('%d -> %s' % (y,x)
for x,y in self.__acq_mode_from_str.iteritems()))
raise ValueError("lima: acquisition mode can only be: %s" % possible_modes)
self._proxy.acq_mode = mode_str
@property
def trigger_mode(self):
"""
Trigger camera mode
"""
pass
@trigger_mode.setter
def trigger_mode(self,value):
pass
def __init__(self,name,config_tree):
"""Lima controller.
name -- the controller's name
config_tree -- controller configuration
in this dictionary we need to have:
tango_url -- tango main device url (from class LimaCCDs)
"""
self._proxy = DeviceProxy(config_tree.get("tango_url"))
self.name = name
self.__bpm = None
self.__roi_counters = None
self._camera = None
self._image = None
self._acquisition = None
@property
def proxy(self):
return self._proxy
@property
def image(self):
if self._image is None:
self._image = Lima.Image(self._proxy)
return self._image
@property
def shape(self):
return (-1, -1)
@property
def acquisition(self):
if self._acquisition is None:
self._acquisition = Lima.Acquisition(self._proxy)
return self._acquisition
@property
def roi_counters(self):
if self.__roi_counters is None:
roi_counters_proxy = self._get_proxy(self.ROI_COUNTERS)
self.__roi_counters = RoiCounters(self.name, roi_counters_proxy, self)
return self.__roi_counters
@property
def camera(self):
if self._camera is None:
camera_type = self._proxy.lima_type
proxy = self._get_proxy(camera_type)
camera_module = importlib.import_module('.%s' % camera_type,__package__)
self._camera = camera_module.Camera(self.name, proxy)
return self._camera
@property
def camera_type(self):
return self._proxy.camera_type
@property
def bpm(self):
if self.__bpm is None:
bpm_proxy = self._get_proxy(self.BPM)
self.__bpm = Bpm(self.name, bpm_proxy, self)
return self.__bpm
@property
def available_triggers(self):
"""
This will returns all availables triggers for the camera
"""
return self._proxy.getAttrStringValueList('acq_trigger_mode')
def prepareAcq(self):
self._proxy.prepareAcq()
def startAcq(self):
self._proxy.startAcq()
def _get_proxy(self,type_name):
device_name = self._proxy.getPluginDeviceNameFromType(type_name)
if not device_name:
return
if not device_name.startswith("//"):
# build 'fully qualified domain' name
# '.get_fqdn()' doesn't work
db_host = self._proxy.get_db_host()
db_port = self._proxy.get_db_port()
device_name = "//%s:%s/%s" % (db_host, db_port, device_name)
return DeviceProxy(device_name)
#!/usr/bin/python
# Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Remove strings by name from a GRD file."""
import optparse
import re
import sys
def RemoveStrings(grd_path, string_names):
"""Removes strings with the given names from a GRD file. Overwrites the file.
Args:
grd_path: path to the GRD file.
string_names: a list of string names to be removed.
"""
with open(grd_path, 'r') as f:
grd = f.read()
names_pattern = '|'.join(map(re.escape, string_names))
pattern = r']*name="(%s)".*?\s*' % names_pattern
grd = re.sub(pattern, '', grd, flags=re.DOTALL)
with open(grd_path, 'w') as f:
f.write(grd)
def ParseArgs(args):
usage = 'usage: %prog GRD_PATH...'
parser = optparse.OptionParser(
usage=usage, description='Remove strings from GRD files. Reads string '
'names from stdin, and removes strings with those names from the listed '
'GRD files.')
options, args = parser.parse_args(args=args)
if not args:
parser.error('must provide GRD_PATH argument(s)')
return args
def main(args=None):
grd_paths = ParseArgs(args)
strings_to_remove = filter(None, map(str.strip, sys.stdin.readlines()))
for grd_path in grd_paths:
RemoveStrings(grd_path, strings_to_remove)
if __name__ == '__main__':
main()
import datetime
import hmac
import base64
import hashlib
import asyncio
from xml.etree.ElementTree import fromstring as parse_xml
from xml.etree.ElementTree import tostring as xml_tostring
from xml.etree.ElementTree import Element, SubElement
from functools import partial
from urllib.parse import quote
import aiohttp
from . import errors
amz_uriencode = partial(quote, safe='~')
amz_uriencode_slash = partial(quote, safe='~/')
S3_NS = 'http://s3.amazonaws.com/doc/2006-03-01/'
NS = {'s3': S3_NS}
_SIGNATURES = {}
SIGNATURE_V4 = 'v4'
class Key(object):
def __init__(self, *, key, last_modified, etag, size, storage_class):
self.key = key
self.last_modified = last_modified
self.etag = etag
self.size = size
self.storage_class = storage_class
@classmethod
def from_xml(Key, el):
return Key(
key=el.find('s3:Key', namespaces=NS).text,
last_modified=datetime.datetime.strptime(
el.find('s3:LastModified', namespaces=NS).text,
'%Y-%m-%dT%H:%M:%S.000Z'),
etag=el.find('s3:ETag', namespaces=NS).text,
size=int(el.find('s3:Size', namespaces=NS).text),
storage_class=el.find('s3:StorageClass', namespaces=NS).text)
def __repr__(self):
return ''.format(self.key, self.size)
class Request(object):
def __init__(self, verb, resource, query, headers, payload):
self.verb = verb
self.resource = amz_uriencode_slash(resource)
self.params = query
self.query_string = '&'.join(k + '=' + v
for k, v in sorted((amz_uriencode(k), amz_uriencode(v))
for k, v in query.items()))
self.headers = headers
self.payload = payload
self.content_md5 = ''
@property
def url(self):
return 'https://{0.headers[HOST]}{0.resource}?{0.query_string}' \
.format(self)
def _hmac(key, val):
return hmac.new(key, val, hashlib.sha256).digest()
def _signkey(key, date, region, service):
date_key = _hmac(("AWS4" + key).encode('ascii'),
date.encode('ascii'))
date_region_key = _hmac(date_key, region.encode('ascii'))
svc_key = _hmac(date_region_key, service.encode('ascii'))
return _hmac(svc_key, b'aws4_request')
@partial(_SIGNATURES.setdefault, SIGNATURE_V4)
def sign_v4(req, *,
aws_key, aws_secret, aws_token, aws_service='s3', aws_region='us-east-1', **_):
time = datetime.datetime.utcnow()
date = time.strftime('%Y%m%d')
timestr = time.strftime("%Y%m%dT%H%M%SZ")
req.headers['x-amz-date'] = timestr
if isinstance(req.payload, bytes):
payloadhash = hashlib.sha256(req.payload).hexdigest()
else:
payloadhash = 'STREAMING-AWS4-HMAC-SHA256-PAYLOAD'
req.headers['x-amz-content-sha256'] = payloadhash
if aws_token:
req.headers['x-amz-security-token'] = aws_token
signing_key = _signkey(aws_secret, date, aws_region, aws_service)
headernames = ';'.join(k.lower() for k in sorted(req.headers))
creq = (
"{req.verb}\n"
"{req.resource}\n"
"{req.query_string}\n"
"{headers}\n\n"
"{headernames}\n"
"{payloadhash}".format(
req=req,
headers='\n'.join(k.lower() + ':' + req.headers[k].strip()
for k in sorted(req.headers)),
headernames=headernames,
payloadhash=payloadhash
))
string_to_sign = (
"AWS4-HMAC-SHA256\n{ts}\n"
"{date}/{region}/{service}/aws4_request\n"
"{reqhash}".format(
ts=timestr,
date=date,
region=aws_region,
service=aws_service,
reqhash=hashlib.sha256(creq.encode('ascii')).hexdigest(),
))
sig = hmac.new(signing_key, string_to_sign.encode('ascii'),
hashlib.sha256).hexdigest()
ahdr = ('AWS4-HMAC-SHA256 '
'Credential={key}/{date}/{region}/{service}/aws4_request, '
'SignedHeaders={headers}, Signature={sig}'.format(
key=aws_key, date=date, region=aws_region, service=aws_service,
headers=headernames,
sig=sig,
))
req.headers['Authorization'] = ahdr
def _hmac_old(key, val):
return hmac.new(key, val, hashlib.sha1).digest()
class MultipartUpload(object):
def __init__(self, bucket, key, upload_id):
self.bucket = bucket
self.key = key
self.upload_id = upload_id
self.xml = Element('CompleteMultipartUpload')
self.parts = 0
self._done = False
self._uri = '/' + self.key + '?uploadId=' + self.upload_id
@asyncio.coroutine
def add_chunk(self, data):
assert isinstance(data, (bytes, memoryview, bytearray)), data
# figure out how to check chunk size, all but last one
# assert len(data) > 5 << 30, "Chunk must be at least 5Mb"
if self._done:
raise RuntimeError("Can't add_chunk after commit or close")
self.parts += 1
result = yield from self.bucket._request(Request("PUT",
'/' + self.key, {
'uploadId': self.upload_id,
'partNumber': str(self.parts),
}, headers={
'CONTENT-LENGTH': str(len(data)),
'HOST': self.bucket._host,
# next one aiohttp adds for us anyway, so we must put it here
# so it's added into signature
'CONTENT-TYPE': 'application/octed-stream',
}, payload=data))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
etag = result.headers['ETAG']
finally:
result.close()
chunk = SubElement(self.xml, 'Part')
SubElement(chunk, 'PartNumber').text = str(self.parts)
SubElement(chunk, 'ETag').text = etag
@asyncio.coroutine
def commit(self):
if self._done:
raise RuntimeError("Can't commit twice or after close")
self._done = True
data = xml_tostring(self.xml)
result = yield from self.bucket._request(Request("POST",
'/' + self.key, {
'uploadId': self.upload_id,
}, headers={
'CONTENT-LENGTH': str(len(data)),
'HOST': self.bucket._host,
'CONTENT-TYPE': 'application/xml',
}, payload=data))
try:
xml = yield from result.read()
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, xml)
xml = parse_xml(xml)
return xml.find('s3:ETag', namespaces=NS)
finally:
result.close()
@asyncio.coroutine
def close(self):
if self._done:
return
self._done = True
result = yield from self.bucket._request(Request("DELETE",
'/' + self.key, {
'uploadId': self.upload_id,
}, headers={'HOST': self.bucket._host}, payload=b''))
try:
xml = yield from result.read()
if result.status != 204:
raise errors.AWSException.from_bytes(result.status, xml)
finally:
result.close()
class Bucket(object):
def __init__(self, name, *,
port=80,
aws_key, aws_secret, aws_token,
aws_region='us-east-1',
aws_endpoint='s3.amazonaws.com',
signature=SIGNATURE_V4,
connector=None):
self._name = name
self._connector = None
self._aws_sign_data = {
'aws_key': aws_key,
'aws_secret': aws_secret,
'aws_token': aws_token,
'aws_region': aws_region,
'aws_service': 's3',
'aws_bucket': name,
}
self._host = self._name + '.' + aws_endpoint
if port != 80:
self._host = self._host + ':' + str(port)
self._signature = signature
@asyncio.coroutine
def exists(self, prefix=''):
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'separator': '/',
'max-keys': '1'},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
return any(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
@asyncio.coroutine
def list(self, prefix='', max_keys=1000):
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'max-keys': str(max_keys)},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
if x.find('s3:IsTruncated', namespaces=NS).text != 'false':
raise AssertionError(
"File list is truncated, use bigger max_keys")
return list(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
def list_by_chunks(self, prefix='', max_keys=1000, after_filename=None):
final = False
if after_filename:
marker = after_filename
else:
marker = ''
@asyncio.coroutine
def read_next():
nonlocal final, marker
result = yield from self._request(Request(
"GET",
"/",
{'prefix': prefix,
'max-keys': str(max_keys),
'marker': marker},
{'HOST': self._host},
b'',
))
data = (yield from result.read())
if result.status != 200:
raise errors.AWSException.from_bytes(result.status, data)
x = parse_xml(data)
result = list(map(Key.from_xml,
x.findall('s3:Contents', namespaces=NS)))
if(x.find('s3:IsTruncated', namespaces=NS).text == 'false' or
len(result) == 0):
final = True
else:
marker = result[-1].key
return result
while not final:
yield read_next()
@asyncio.coroutine
def download(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request(
"GET", '/' + key, {}, {'HOST': self._host}, b''))
if result.status != 200:
raise errors.AWSException.from_bytes(
result.status, (yield from result.read()))
return result
@asyncio.coroutine
def upload(self, key, data,
content_length=None,
content_type='application/octed-stream',
last_modified=None):
"""Upload file to S3
The `data` might be a generator or stream.
the `content_length` is unchecked so it's responsibility of user to
ensure that it matches data.
Note: Riak CS doesn't allow to upload files without content_length.
"""
if isinstance(key, Key):
key = key.key
if isinstance(data, str):
data = data.encode('utf-8')
headers = {
'HOST': self._host,
'CONTENT-TYPE': content_type,
"x-amz-server-side-encryption": "AES256",
}
if content_length is not None:
headers['CONTENT-LENGTH'] = str(content_length)
if last_modified:
headers.update({"x-amz-last-modified": last_modified})
headers.update({"x-amz-server-side-encryption": "AES256"})
result = yield from self._request(Request("PUT", '/' + key, {},
headers=headers, payload=data))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
return result
finally:
result.close()
@asyncio.coroutine
def delete(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request("DELETE", '/' + key, {},
{'HOST': self._host}, b''))
try:
if result.status != 204:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
return result
finally:
result.close()
@asyncio.coroutine
def get(self, key):
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request(
"GET", '/' + key, {}, {'HOST': self._host}, b''))
if result.status != 200:
raise errors.AWSException.from_bytes(
result.status, (yield from result.read()))
data = yield from result.read()
return data
@asyncio.coroutine
def _request(self, req):
_SIGNATURES[self._signature](req, **self._aws_sign_data)
if isinstance(req.payload, bytes):
req.headers['CONTENT-LENGTH'] = str(len(req.payload))
return (yield from aiohttp.request(req.verb, req.url,
chunked='CONTENT-LENGTH' not in req.headers,
headers=req.headers,
data=req.payload,
connector=self._connector))
@asyncio.coroutine
def upload_multipart(self, key,
content_type='application/octed-stream',
MultipartUpload=MultipartUpload):
"""Upload file to S3 by uploading multiple chunks"""
if isinstance(key, Key):
key = key.key
result = yield from self._request(Request("POST",
'/' + key, {'uploads': ''}, {
'HOST': self._host,
'CONTENT-TYPE': content_type,
}, payload=b''))
try:
if result.status != 200:
xml = yield from result.read()
raise errors.AWSException.from_bytes(result.status, xml)
xml = yield from result.read()
upload_id = parse_xml(xml).find('s3:UploadId',
namespaces=NS).text
assert upload_id, xml
return MultipartUpload(self, key, upload_id)
finally:
result.close()
#!/usr/bin/env python
# portions copyright 2001, Autonomous Zones Industries, Inc., all rights...
# err... reserved and offered to the public under the terms of the
# Python 2.2 license.
# Author: Zooko O'Whielacronx
# http://zooko.com/
# mailto:zooko@zooko.com
#
# Copyright 2000, Mojam Media, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1999, Bioreason, Inc., all rights reserved.
# Author: Andrew Dalke
#
# Copyright 1995-1997, Automatrix, Inc., all rights reserved.
# Author: Skip Montanaro
#
# Copyright 1991-1995, Stichting Mathematisch Centrum, all rights reserved.
#
#
# Permission to use, copy, modify, and distribute this Python software and
# its associated documentation for any purpose without fee is hereby
# granted, provided that the above copyright notice appears in all copies,
# and that both that copyright notice and this permission notice appear in
# supporting documentation, and that the name of neither Automatrix,
# Bioreason or Mojam Media be used in advertising or publicity pertaining to
# distribution of the software without specific, written prior permission.
#
"""program/module to trace Python program or function execution
Sample use, command line:
trace.py -c -f counts --ignore-dir '$prefix' spam.py eggs
trace.py -t --ignore-dir '$prefix' spam.py eggs
trace.py --trackcalls spam.py eggs
Sample use, programmatically
import sys
# create a Trace object, telling it what to ignore, and whether to
# do tracing or line-counting or both.
tracer = trace.Trace(ignoredirs=[sys.prefix, sys.exec_prefix,], trace=0,
count=1)
# run the new command using the given tracer
tracer.run('main()')
# make a report, placing output in /tmp
r = tracer.results()
r.write_results(show_missing=True, coverdir="/tmp")
"""
import linecache
import os
import re
import sys
import time
import token
import tokenize
import inspect
import gc
import dis
try:
import cPickle
pickle = cPickle
except ImportError:
import pickle
try:
import threading
except ImportError:
_settrace = sys.settrace
def _unsettrace():
sys.settrace(None)
else:
def _settrace(func):
threading.settrace(func)
sys.settrace(func)
def _unsettrace():
sys.settrace(None)
threading.settrace(None)
def usage(outfile):
outfile.write("""Usage: %s [OPTIONS] [ARGS]
Meta-options:
--help Display this help then exit.
--version Output version information then exit.
Otherwise, exactly one of the following three options must be given:
-t, --trace Print each line to sys.stdout before it is executed.
-c, --count Count the number of times each line is executed
and write the counts to .cover for each
module executed, in the module's directory.
See also `--coverdir', `--file', `--no-report' below.
-l, --listfuncs Keep track of which functions are executed at least
once and write the results to sys.stdout after the
program exits.
-T, --trackcalls Keep track of caller/called pairs and write the
results to sys.stdout after the program exits.
-r, --report Generate a report from a counts file; do not execute
any code. `--file' must specify the results file to
read, which must have been created in a previous run
with `--count --file=FILE'.
Modifiers:
-f, --file= File to accumulate counts over several runs.
-R, --no-report Do not generate the coverage report files.
Useful if you want to accumulate over several runs.
-C, --coverdir= Directory where the report files. The coverage
report for . is written to file
//.cover.
-m, --missing Annotate executable lines that were not executed
with '>>>>>> '.
-s, --summary Write a brief summary on stdout for each file.
(Can only be used with --count or --report.)
-g, --timing Prefix each line with the time since the program started.
Only used while tracing.
Filters, may be repeated multiple times:
--ignore-module= Ignore the given module(s) and its submodules
(if it is a package). Accepts comma separated
list of module names
--ignore-dir= Ignore files in the given directory (multiple
directories can be joined by os.pathsep).
""" % sys.argv[0])
PRAGMA_NOCOVER = "#pragma NO COVER"
# Simple rx to find lines with no code.
rx_blank = re.compile(r'^\s*(#.*)?$')
class Ignore:
def __init__(self, modules = None, dirs = None):
self._mods = modules or []
self._dirs = dirs or []
self._dirs = map(os.path.normpath, self._dirs)
self._ignore = { '': 1 }
def names(self, filename, modulename):
if modulename in self._ignore:
return self._ignore[modulename]
# haven't seen this one before, so see if the module name is
# on the ignore list. Need to take some care since ignoring
# "cmp" musn't mean ignoring "cmpcache" but ignoring
# "Spam" must also mean ignoring "Spam.Eggs".
for mod in self._mods:
if mod == modulename: # Identical names, so ignore
self._ignore[modulename] = 1
return 1
# check if the module is a proper submodule of something on
# the ignore list
n = len(mod)
# (will not overflow since if the first n characters are the
# same and the name has not already occurred, then the size
# of "name" is greater than that of "mod")
if mod == modulename[:n] and modulename[n] == '.':
self._ignore[modulename] = 1
return 1
# Now check that __file__ isn't in one of the directories
if filename is None:
# must be a built-in, so we must ignore
self._ignore[modulename] = 1
return 1
# Ignore a file when it contains one of the ignorable paths
for d in self._dirs:
# The '+ os.sep' is to ensure that d is a parent directory,
# as compared to cases like:
# d = "/usr/local"
# filename = "/usr/local.py"
# or
# d = "/usr/local.py"
# filename = "/usr/local.py"
if filename.startswith(d + os.sep):
self._ignore[modulename] = 1
return 1
# Tried the different ways, so we don't ignore this module
self._ignore[modulename] = 0
return 0
def modname(path):
"""Return a plausible module name for the patch."""
base = os.path.basename(path)
filename, ext = os.path.splitext(base)
return filename
def fullmodname(path):
"""Return a plausible module name for the path."""
# If the file 'path' is part of a package, then the filename isn't
# enough to uniquely identify it. Try to do the right thing by
# looking in sys.path for the longest matching prefix. We'll
# assume that the rest is the package name.
comparepath = os.path.normcase(path)
longest = ""
for dir in sys.path:
dir = os.path.normcase(dir)
if comparepath.startswith(dir) and comparepath[len(dir)] == os.sep:
if len(dir) > len(longest):
longest = dir
if longest:
base = path[len(longest) + 1:]
else:
base = path
# the drive letter is never part of the module name
drive, base = os.path.splitdrive(base)
base = base.replace(os.sep, ".")
if os.altsep:
base = base.replace(os.altsep, ".")
filename, ext = os.path.splitext(base)
return filename.lstrip(".")
class CoverageResults:
def __init__(self, counts=None, calledfuncs=None, infile=None,
callers=None, outfile=None):
self.counts = counts
if self.counts is None:
self.counts = {}
self.counter = self.counts.copy() # map (filename, lineno) to count
self.calledfuncs = calledfuncs
if self.calledfuncs is None:
self.calledfuncs = {}
self.calledfuncs = self.calledfuncs.copy()
self.callers = callers
if self.callers is None:
self.callers = {}
self.callers = self.callers.copy()
self.infile = infile
self.outfile = outfile
if self.infile:
# Try to merge existing counts file.
try:
counts, calledfuncs, callers = \
pickle.load(open(self.infile, 'rb'))
self.update(self.__class__(counts, calledfuncs, callers))
except (IOError, EOFError, ValueError), err:
print >> sys.stderr, ("Skipping counts file %r: %s"
% (self.infile, err))
def update(self, other):
"""Merge in the data from another CoverageResults"""
counts = self.counts
calledfuncs = self.calledfuncs
callers = self.callers
other_counts = other.counts
other_calledfuncs = other.calledfuncs
other_callers = other.callers
for key in other_counts.keys():
counts[key] = counts.get(key, 0) + other_counts[key]
for key in other_calledfuncs.keys():
calledfuncs[key] = 1
for key in other_callers.keys():
callers[key] = 1
def write_results(self, show_missing=True, summary=False, coverdir=None):
"""
@param coverdir
"""
if self.calledfuncs:
print
print "functions called:"
calls = self.calledfuncs.keys()
calls.sort()
for filename, modulename, funcname in calls:
print ("filename: %s, modulename: %s, funcname: %s"
% (filename, modulename, funcname))
if self.callers:
print
print "calling relationships:"
calls = self.callers.keys()
calls.sort()
lastfile = lastcfile = ""
for ((pfile, pmod, pfunc), (cfile, cmod, cfunc)) in calls:
if pfile != lastfile:
print
print "***", pfile, "***"
lastfile = pfile
lastcfile = ""
if cfile != pfile and lastcfile != cfile:
print " -->", cfile
lastcfile = cfile
print " %s.%s -> %s.%s" % (pmod, pfunc, cmod, cfunc)
# turn the counts data ("(filename, lineno) = count") into something
# accessible on a per-file basis
per_file = {}
for filename, lineno in self.counts.keys():
lines_hit = per_file[filename] = per_file.get(filename, {})
lines_hit[lineno] = self.counts[(filename, lineno)]
# accumulate summary info, if needed
sums = {}
for filename, count in per_file.iteritems():
# skip some "files" we don't care about...
if filename == "":
continue
if filename.startswith("> sys.stderr, "Can't save counts files because %s" % err
def write_results_file(self, path, lines, lnotab, lines_hit):
"""Return a coverage results file in path."""
try:
outfile = open(path, "w")
except IOError, err:
print >> sys.stderr, ("trace: Could not open %r for writing: %s"
"- skipping" % (path, err))
return 0, 0
n_lines = 0
n_hits = 0
for i, line in enumerate(lines):
lineno = i + 1
# do the blank/comment match to try to mark more lines
# (help the reader find stuff that hasn't been covered)
if lineno in lines_hit:
outfile.write("%5d: " % lines_hit[lineno])
n_hits += 1
n_lines += 1
elif rx_blank.match(line):
outfile.write(" ")
else:
# lines preceded by no marks weren't hit
# Highlight them if so indicated, unless the line contains
# #pragma: NO COVER
if lineno in lnotab and not PRAGMA_NOCOVER in lines[i]:
outfile.write(">>>>>> ")
n_lines += 1
else:
outfile.write(" ")
outfile.write(lines[i].expandtabs(8))
outfile.close()
return n_hits, n_lines
def find_lines_from_code(code, strs):
"""Return dict where keys are lines in the line number table."""
linenos = {}
for _, lineno in dis.findlinestarts(code):
if lineno not in strs:
linenos[lineno] = 1
return linenos
def find_lines(code, strs):
"""Return lineno dict for all code objects reachable from code."""
# get all of the lineno information from the code of this scope level
linenos = find_lines_from_code(code, strs)
# and check the constants for references to other code objects
for c in code.co_consts:
if inspect.iscode(c):
# find another code object, so recurse into it
linenos.update(find_lines(c, strs))
return linenos
def find_strings(filename):
"""Return a dict of possible docstring positions.
The dict maps line numbers to strings. There is an entry for
line that contains only a string or a part of a triple-quoted
string.
"""
d = {}
# If the first token is a string, then it's the module docstring.
# Add this special case so that the test in the loop passes.
prev_ttype = token.INDENT
f = open(filename)
for ttype, tstr, start, end, line in tokenize.generate_tokens(f.readline):
if ttype == token.STRING:
if prev_ttype == token.INDENT:
sline, scol = start
eline, ecol = end
for i in range(sline, eline + 1):
d[i] = 1
prev_ttype = ttype
f.close()
return d
def find_executable_linenos(filename):
"""Return dict where keys are line numbers in the line number table."""
try:
prog = open(filename, "rU").read()
except IOError, err:
print >> sys.stderr, ("Not printing coverage data for %r: %s"
% (filename, err))
return {}
code = compile(prog, filename, "exec")
strs = find_strings(filename)
return find_lines(code, strs)
class Trace:
def __init__(self, count=1, trace=1, countfuncs=0, countcallers=0,
ignoremods=(), ignoredirs=(), infile=None, outfile=None,
timing=False):
"""
@param count true iff it should count number of times each
line is executed
@param trace true iff it should print out each line that is
being counted
@param countfuncs true iff it should just output a list of
(filename, modulename, funcname,) for functions
that were called at least once; This overrides
`count' and `trace'
@param ignoremods a list of the names of modules to ignore
@param ignoredirs a list of the names of directories to ignore
all of the (recursive) contents of
@param infile file from which to read stored counts to be
added into the results
@param outfile file in which to write the results
@param timing true iff timing information be displayed
"""
self.infile = infile
self.outfile = outfile
self.ignore = Ignore(ignoremods, ignoredirs)
self.counts = {} # keys are (filename, linenumber)
self.blabbed = {} # for debugging
self.pathtobasename = {} # for memoizing os.path.basename
self.donothing = 0
self.trace = trace
self._calledfuncs = {}
self._callers = {}
self._caller_cache = {}
self.start_time = None
if timing:
self.start_time = time.time()
if countcallers:
self.globaltrace = self.globaltrace_trackcallers
elif countfuncs:
self.globaltrace = self.globaltrace_countfuncs
elif trace and count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace_and_count
elif trace:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_trace
elif count:
self.globaltrace = self.globaltrace_lt
self.localtrace = self.localtrace_count
else:
# Ahem -- do nothing? Okay.
self.donothing = 1
def run(self, cmd):
import __main__
dict = __main__.__dict__
if not self.donothing:
threading.settrace(self.globaltrace)
sys.settrace(self.globaltrace)
try:
exec cmd in dict, dict
finally:
if not self.donothing:
sys.settrace(None)
threading.settrace(None)
def runctx(self, cmd, globals=None, locals=None):
if globals is None: globals = {}
if locals is None: locals = {}
if not self.donothing:
_settrace(self.globaltrace)
try:
exec cmd in globals, locals
finally:
if not self.donothing:
_unsettrace()
def runfunc(self, func, *args, **kw):
result = None
if not self.donothing:
sys.settrace(self.globaltrace)
try:
result = func(*args, **kw)
finally:
if not self.donothing:
sys.settrace(None)
return result
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 0:
# PyPy may store functions directly on the class
# (more exactly: the container is not a Python object)
dicts = funcs
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def globaltrace_trackcallers(self, frame, why, arg):
"""Handler for call events.
Adds information about who called who to the self._callers dict.
"""
if why == 'call':
# XXX Should do a better job of identifying methods
this_func = self.file_module_function_of(frame)
parent_func = self.file_module_function_of(frame.f_back)
self._callers[(parent_func, this_func)] = 1
def globaltrace_countfuncs(self, frame, why, arg):
"""Handler for call events.
Adds (filename, modulename, funcname) to the self._calledfuncs dict.
"""
if why == 'call':
this_func = self.file_module_function_of(frame)
self._calledfuncs[this_func] = 1
def globaltrace_lt(self, frame, why, arg):
"""Handler for call events.
If the code block being entered is to be ignored, returns `None',
else returns self.localtrace.
"""
if why == 'call':
code = frame.f_code
filename = frame.f_globals.get('__file__', None)
if filename:
# XXX modname() doesn't work right for packages, so
# the ignore support won't work right for packages
modulename = modname(filename)
if modulename is not None:
ignore_it = self.ignore.names(filename, modulename)
if not ignore_it:
if self.trace:
print (" --- modulename: %s, funcname: %s"
% (modulename, code.co_name))
return self.localtrace
else:
return None
def localtrace_trace_and_count(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_trace(self, frame, why, arg):
if why == "line":
# record the file name and line number of every trace
filename = frame.f_code.co_filename
lineno = frame.f_lineno
if self.start_time:
print '%.2f' % (time.time() - self.start_time),
bname = os.path.basename(filename)
print "%s(%d): %s" % (bname, lineno,
linecache.getline(filename, lineno)),
return self.localtrace
def localtrace_count(self, frame, why, arg):
if why == "line":
filename = frame.f_code.co_filename
lineno = frame.f_lineno
key = filename, lineno
self.counts[key] = self.counts.get(key, 0) + 1
return self.localtrace
def results(self):
return CoverageResults(self.counts, infile=self.infile,
outfile=self.outfile,
calledfuncs=self._calledfuncs,
callers=self._callers)
def _err_exit(msg):
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.exit(1)
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv
try:
opts, prog_argv = getopt.getopt(argv[1:], "tcrRf:d:msC:lTg",
["help", "version", "trace", "count",
"report", "no-report", "summary",
"file=", "missing",
"ignore-module=", "ignore-dir=",
"coverdir=", "listfuncs",
"trackcalls", "timing"])
except getopt.error, msg:
sys.stderr.write("%s: %s\n" % (sys.argv[0], msg))
sys.stderr.write("Try `%s --help' for more information\n"
% sys.argv[0])
sys.exit(1)
trace = 0
count = 0
report = 0
no_report = 0
counts_file = None
missing = 0
ignore_modules = []
ignore_dirs = []
coverdir = None
summary = 0
listfuncs = False
countcallers = False
timing = False
for opt, val in opts:
if opt == "--help":
usage(sys.stdout)
sys.exit(0)
if opt == "--version":
sys.stdout.write("trace 2.0\n")
sys.exit(0)
if opt == "-T" or opt == "--trackcalls":
countcallers = True
continue
if opt == "-l" or opt == "--listfuncs":
listfuncs = True
continue
if opt == "-g" or opt == "--timing":
timing = True
continue
if opt == "-t" or opt == "--trace":
trace = 1
continue
if opt == "-c" or opt == "--count":
count = 1
continue
if opt == "-r" or opt == "--report":
report = 1
continue
if opt == "-R" or opt == "--no-report":
no_report = 1
continue
if opt == "-f" or opt == "--file":
counts_file = val
continue
if opt == "-m" or opt == "--missing":
missing = 1
continue
if opt == "-C" or opt == "--coverdir":
coverdir = val
continue
if opt == "-s" or opt == "--summary":
summary = 1
continue
if opt == "--ignore-module":
for mod in val.split(","):
ignore_modules.append(mod.strip())
continue
if opt == "--ignore-dir":
for s in val.split(os.pathsep):
s = os.path.expandvars(s)
# should I also call expanduser? (after all, could use $HOME)
s = s.replace("$prefix",
os.path.join(sys.prefix, "lib",
"python" + sys.version[:3]))
s = s.replace("$exec_prefix",
os.path.join(sys.exec_prefix, "lib",
"python" + sys.version[:3]))
s = os.path.normpath(s)
ignore_dirs.append(s)
continue
assert 0, "Should never get here"
if listfuncs and (count or trace):
_err_exit("cannot specify both --listfuncs and (--trace or --count)")
if not (count or trace or report or listfuncs or countcallers):
_err_exit("must specify one of --trace, --count, --report, "
"--listfuncs, or --trackcalls")
if report and no_report:
_err_exit("cannot specify both --report and --no-report")
if report and not counts_file:
_err_exit("--report requires a --file")
if no_report and len(prog_argv) == 0:
_err_exit("missing name of file to run")
# everything is ready
if report:
results = CoverageResults(infile=counts_file, outfile=counts_file)
results.write_results(missing, summary=summary, coverdir=coverdir)
else:
sys.argv = prog_argv
progname = prog_argv[0]
sys.path[0] = os.path.split(progname)[0]
t = Trace(count, trace, countfuncs=listfuncs,
countcallers=countcallers, ignoremods=ignore_modules,
ignoredirs=ignore_dirs, infile=counts_file,
outfile=counts_file, timing=timing)
try:
with open(progname) as fp:
code = compile(fp.read(), progname, 'exec')
# try to emulate __main__ namespace as much as possible
globs = {
'__file__': progname,
'__name__': '__main__',
'__package__': None,
'__cached__': None,
}
t.runctx(code, globs, globs)
except IOError, err:
_err_exit("Cannot run file %r because: %s" % (sys.argv[0], err))
except SystemExit:
pass
results = t.results()
if not no_report:
results.write_results(missing, summary=summary, coverdir=coverdir)
if __name__=='__main__':
main()
# Author: Travis Oliphant 2001
# Author: Nathan Woods 2013 (nquad &c)
from __future__ import division, print_function, absolute_import
import sys
import warnings
from functools import partial
from . import _quadpack
import numpy
from numpy import Inf
__all__ = ['quad', 'dblquad', 'tplquad', 'nquad', 'quad_explain',
'IntegrationWarning']
error = _quadpack.error
class IntegrationWarning(UserWarning):
"""
Warning on issues during integration.
"""
pass
def quad_explain(output=sys.stdout):
"""
Print extra information about integrate.quad() parameters and returns.
Parameters
----------
output : instance with "write" method, optional
Information about `quad` is passed to ``output.write()``.
Default is ``sys.stdout``.
Returns
-------
None
"""
output.write(quad.__doc__)
def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
limlst=50):
"""
Compute a definite integral.
Integrate func from `a` to `b` (possibly infinite interval) using a
technique from the Fortran library QUADPACK.
Parameters
----------
func : function
A Python function or method to integrate. If `func` takes many
arguments, it is integrated along the axis corresponding to the
first argument.
If the user desires improved integration performance, then f may
instead be a ``ctypes`` function of the form:
f(int n, double args[n]),
where ``args`` is an array of function arguments and ``n`` is the
length of ``args``. ``f.argtypes`` should be set to
``(c_int, c_double)``, and ``f.restype`` should be ``(c_double,)``.
a : float
Lower limit of integration (use -numpy.inf for -infinity).
b : float
Upper limit of integration (use numpy.inf for +infinity).
args : tuple, optional
Extra arguments to pass to `func`.
full_output : int, optional
Non-zero to return a dictionary of integration information.
If non-zero, warning messages are also suppressed and the
message is appended to the output tuple.
Returns
-------
y : float
The integral of func from `a` to `b`.
abserr : float
An estimate of the absolute error in the result.
infodict : dict
A dictionary containing additional information.
Run scipy.integrate.quad_explain() for more information.
message
A convergence message.
explain
Appended only with 'cos' or 'sin' weighting and infinite
integration limits, it contains an explanation of the codes in
infodict['ierlst']
Other Parameters
----------------
epsabs : float or int, optional
Absolute error tolerance.
epsrel : float or int, optional
Relative error tolerance.
limit : float or int, optional
An upper bound on the number of subintervals used in the adaptive
algorithm.
points : (sequence of floats,ints), optional
A sequence of break points in the bounded integration interval
where local difficulties of the integrand may occur (e.g.,
singularities, discontinuities). The sequence does not have
to be sorted.
weight : float or int, optional
String indicating weighting function. Full explanation for this
and the remaining arguments can be found below.
wvar : optional
Variables for use with weighting functions.
wopts : optional
Optional input for reusing Chebyshev moments.
maxp1 : float or int, optional
An upper bound on the number of Chebyshev moments.
limlst : int, optional
Upper bound on the number of cycles (>=3) for use with a sinusoidal
weighting and an infinite end-point.
See Also
--------
dblquad : double integral
tplquad : triple integral
nquad : n-dimensional integrals (uses `quad` recursively)
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
Notes
-----
**Extra information for quad() inputs and outputs**
If full_output is non-zero, then the third output argument
(infodict) is a dictionary with entries as tabulated below. For
infinite limits, the range is transformed to (0,1) and the
optional outputs are given with respect to this transformed range.
Let M be the input argument limit and let K be infodict['last'].
The entries are:
'neval'
The number of function evaluations.
'last'
The number, K, of subintervals produced in the subdivision process.
'alist'
A rank-1 array of length M, the first K elements of which are the
left end points of the subintervals in the partition of the
integration range.
'blist'
A rank-1 array of length M, the first K elements of which are the
right end points of the subintervals.
'rlist'
A rank-1 array of length M, the first K elements of which are the
integral approximations on the subintervals.
'elist'
A rank-1 array of length M, the first K elements of which are the
moduli of the absolute error estimates on the subintervals.
'iord'
A rank-1 integer array of length M, the first L elements of
which are pointers to the error estimates over the subintervals
with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
sequence ``infodict['iord']`` and let E be the sequence
``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
decreasing sequence.
If the input argument points is provided (i.e. it is not None),
the following additional outputs are placed in the output
dictionary. Assume the points sequence is of length P.
'pts'
A rank-1 array of length P+2 containing the integration limits
and the break points of the intervals in ascending order.
This is an array giving the subintervals over which integration
will occur.
'level'
A rank-1 integer array of length M (=limit), containing the
subdivision levels of the subintervals, i.e., if (aa,bb) is a
subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
'ndin'
A rank-1 integer array of length P+2. After the first integration
over the intervals (pts[1], pts[2]), the error estimates over some
of the intervals may have been increased artificially in order to
put their subdivision forward. This array has ones in slots
corresponding to the subintervals for which this happens.
**Weighting the integrand**
The input variables, *weight* and *wvar*, are used to weight the
integrand by a select list of functions. Different integration
methods are used to compute the integral with these weighting
functions. The possible values of weight and the corresponding
weighting functions are.
========== =================================== =====================
``weight`` Weight function used ``wvar``
========== =================================== =====================
'cos' cos(w*x) wvar = w
'sin' sin(w*x) wvar = w
'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
'cauchy' 1/(x-c) wvar = c
========== =================================== =====================
wvar holds the parameter w, (alpha, beta), or c depending on the weight
selected. In these expressions, a and b are the integration limits.
For the 'cos' and 'sin' weighting, additional inputs and outputs are
available.
For finite integration limits, the integration is performed using a
Clenshaw-Curtis method which uses Chebyshev moments. For repeated
calculations, these moments are saved in the output dictionary:
'momcom'
The maximum level of Chebyshev moments that have been computed,
i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
computed for intervals of length ``|b-a| * 2**(-l)``,
``l=0,1,...,M_c``.
'nnlog'
A rank-1 integer array of length M(=limit), containing the
subdivision levels of the subintervals, i.e., an element of this
array is equal to l if the corresponding subinterval is
``|b-a|* 2**(-l)``.
'chebmo'
A rank-2 array of shape (25, maxp1) containing the computed
Chebyshev moments. These can be passed on to an integration
over the same interval by passing this array as the second
element of the sequence wopts and passing infodict['momcom'] as
the first element.
If one of the integration limits is infinite, then a Fourier integral is
computed (assuming w neq 0). If full_output is 1 and a numerical error
is encountered, besides the error message attached to the output tuple,
a dictionary is also appended to the output tuple which translates the
error codes in the array ``info['ierlst']`` to English messages. The
output information dictionary contains the following entries instead of
'last', 'alist', 'blist', 'rlist', and 'elist':
'lst'
The number of subintervals needed for the integration (call it ``K_f``).
'rslst'
A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
contain the integral contribution over the interval
``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
and ``k=1,2,...,K_f``.
'erlst'
A rank-1 array of length ``M_f`` containing the error estimate
corresponding to the interval in the same position in
``infodict['rslist']``.
'ierlst'
A rank-1 integer array of length ``M_f`` containing an error flag
corresponding to the interval in the same position in
``infodict['rslist']``. See the explanation dictionary (last entry
in the output tuple) for the meaning of the codes.
Examples
--------
Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
>>> from scipy import integrate
>>> x2 = lambda x: x**2
>>> integrate.quad(x2, 0, 4)
(21.333333333333332, 2.3684757858670003e-13)
>>> print(4**3 / 3.) # analytical result
21.3333333333
Calculate :math:`\\int^\\infty_0 e^{-x} dx`
>>> invexp = lambda x: np.exp(-x)
>>> integrate.quad(invexp, 0, np.inf)
(1.0, 5.842605999138044e-11)
>>> f = lambda x,a : a*x
>>> y, err = integrate.quad(f, 0, 1, args=(1,))
>>> y
0.5
>>> y, err = integrate.quad(f, 0, 1, args=(3,))
>>> y
1.5
Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
y parameter as 1::
testlib.c =>
double func(int n, double args[n]){
return args[0]*args[0] + args[1]*args[1];}
compile to library testlib.*
::
from scipy import integrate
import ctypes
lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
lib.func.restype = ctypes.c_double
lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
integrate.quad(lib.func,0,1,(1))
#(1.3333333333333333, 1.4802973661668752e-14)
print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
# 1.3333333333333333
"""
if not isinstance(args, tuple):
args = (args,)
if (weight is None):
retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
points)
else:
retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
limlst, limit, maxp1, weight, wvar, wopts)
ier = retval[-1]
if ier == 0:
return retval[:-1]
msgs = {80: "A Python error occurred possibly while calling the function.",
1: "The maximum number of subdivisions (%d) has been achieved.\n If increasing the limit yields no improvement it is advised to analyze \n the integrand in order to determine the difficulties. If the position of a \n local difficulty can be determined (singularity, discontinuity) one will \n probably gain from splitting up the interval and calling the integrator \n on the subranges. Perhaps a special-purpose integrator should be used." % limit,
2: "The occurrence of roundoff error is detected, which prevents \n the requested tolerance from being achieved. The error may be \n underestimated.",
3: "Extremely bad integrand behavior occurs at some points of the\n integration interval.",
4: "The algorithm does not converge. Roundoff error is detected\n in the extrapolation table. It is assumed that the requested tolerance\n cannot be achieved, and that the returned result (if full_output = 1) is \n the best which can be obtained.",
5: "The integral is probably divergent, or slowly convergent.",
6: "The input is invalid.",
7: "Abnormal termination of the routine. The estimates for result\n and error are less reliable. It is assumed that the requested accuracy\n has not been achieved.",
'unknown': "Unknown error."}
if weight in ['cos','sin'] and (b == Inf or a == -Inf):
msgs[1] = "The maximum number of cycles allowed has been achieved., e.e.\n of subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n *pi/abs(omega), for k = 1, 2, ..., lst. One can allow more cycles by increasing the value of limlst. Look at info['ierlst'] with full_output=1."
msgs[4] = "The extrapolation table constructed for convergence acceleration\n of the series formed by the integral contributions over the cycles, \n does not converge to within the requested accuracy. Look at \n info['ierlst'] with full_output=1."
msgs[7] = "Bad integrand behavior occurs within one or more of the cycles.\n Location and type of the difficulty involved can be determined from \n the vector info['ierlist'] obtained with full_output=1."
explain = {1: "The maximum number of subdivisions (= limit) has been \n achieved on this cycle.",
2: "The occurrence of roundoff error is detected and prevents\n the tolerance imposed on this cycle from being achieved.",
3: "Extremely bad integrand behavior occurs at some points of\n this cycle.",
4: "The integral over this cycle does not converge (to within the required accuracy) due to roundoff in the extrapolation procedure invoked on this cycle. It is assumed that the result on this interval is the best which can be obtained.",
5: "The integral over this cycle is probably divergent or slowly convergent."}
try:
msg = msgs[ier]
except KeyError:
msg = msgs['unknown']
if ier in [1,2,3,4,5,7]:
if full_output:
if weight in ['cos','sin'] and (b == Inf or a == Inf):
return retval[:-1] + (msg, explain)
else:
return retval[:-1] + (msg,)
else:
warnings.warn(msg, IntegrationWarning)
return retval[:-1]
else:
raise ValueError(msg)
def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
infbounds = 0
if (b != Inf and a != -Inf):
pass # standard integration
elif (b == Inf and a != -Inf):
infbounds = 1
bound = a
elif (b == Inf and a == -Inf):
infbounds = 2
bound = 0 # ignored
elif (b != Inf and a == -Inf):
infbounds = -1
bound = b
else:
raise RuntimeError("Infinity comparisons don't work for you.")
if points is None:
if infbounds == 0:
return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
else:
return _quadpack._qagie(func,bound,infbounds,args,full_output,epsabs,epsrel,limit)
else:
if infbounds != 0:
raise ValueError("Infinity inputs cannot be used with break points.")
else:
nl = len(points)
the_points = numpy.zeros((nl+2,), float)
the_points[:nl] = points
return _quadpack._qagpe(func,a,b,the_points,args,full_output,epsabs,epsrel,limit)
def _quad_weight(func,a,b,args,full_output,epsabs,epsrel,limlst,limit,maxp1,weight,wvar,wopts):
if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
raise ValueError("%s not a recognized weighting function." % weight)
strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
if weight in ['cos','sin']:
integr = strdict[weight]
if (b != Inf and a != -Inf): # finite limits
if wopts is None: # no precomputed chebyshev moments
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1,1)
else: # precomputed chebyshev moments
momcom = wopts[0]
chebcom = wopts[1]
return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
epsabs, epsrel, limit, maxp1, 2, momcom, chebcom)
elif (b == Inf and a != -Inf):
return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
epsabs,limlst,limit,maxp1)
elif (b != Inf and a == -Inf): # remap function and interval
if weight == 'cos':
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return func(*myargs)
else:
def thefunc(x,*myargs):
y = -x
func = myargs[0]
myargs = (y,) + myargs[1:]
return -func(*myargs)
args = (func,) + args
return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
full_output, epsabs, limlst, limit, maxp1)
else:
raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
else:
if a in [-Inf,Inf] or b in [-Inf,Inf]:
raise ValueError("Cannot integrate with this weight over an infinite interval.")
if weight[:3] == 'alg':
integr = strdict[weight]
return _quadpack._qawse(func, a, b, wvar, integr, args,
full_output, epsabs, epsrel, limit)
else: # weight == 'cauchy'
return _quadpack._qawce(func, a, b, wvar, args, full_output,
epsabs, epsrel, limit)
def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
"""
Compute a double integral.
Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
and ``y = gfun(x)..hfun(x)``.
Parameters
----------
func : callable
A Python function or method of at least two variables: y must be the
first argument and x the second argument.
a, b : float
The limits of integration in x: `a` < `b`
gfun : callable
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result: a
lambda function can be useful here.
hfun : callable
The upper boundary curve in y (same requirements as `gfun`).
args : sequence, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the inner 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See also
--------
quad : single integral
tplquad : triple integral
nquad : N-dimensional integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
odeint : ODE integrator
ode : ODE integrator
simps : integrator for sampled data
romb : integrator for sampled data
scipy.special : for coefficients and roots of orthogonal polynomials
"""
def temp_ranges(*args):
return [gfun(args[0]), hfun(args[0])]
return nquad(func, [temp_ranges, [a, b]], args=args)
def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
epsrel=1.49e-8):
"""
Compute a triple (definite) integral.
Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
Parameters
----------
func : function
A Python function or method of at least three variables in the
order (z, y, x).
a, b : float
The limits of integration in x: `a` < `b`
gfun : function
The lower boundary curve in y which is a function taking a single
floating point argument (x) and returning a floating point result:
a lambda function can be useful here.
hfun : function
The upper boundary curve in y (same requirements as `gfun`).
qfun : function
The lower boundary surface in z. It must be a function that takes
two floats in the order (x, y) and returns a float.
rfun : function
The upper boundary surface in z. (Same requirements as `qfun`.)
args : tuple, optional
Extra arguments to pass to `func`.
epsabs : float, optional
Absolute tolerance passed directly to the innermost 1-D quadrature
integration. Default is 1.49e-8.
epsrel : float, optional
Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
Returns
-------
y : float
The resultant integral.
abserr : float
An estimate of the error.
See Also
--------
quad: Adaptive quadrature using QUADPACK
quadrature: Adaptive Gaussian quadrature
fixed_quad: Fixed-order Gaussian quadrature
dblquad: Double integrals
nquad : N-dimensional integrals
romb: Integrators for sampled data
simps: Integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
scipy.special: For coefficients and roots of orthogonal polynomials
"""
# f(z, y, x)
# qfun/rfun (x, y)
# gfun/hfun(x)
# nquad will hand (y, x, t0, ...) to ranges0
# nquad will hand (x, t0, ...) to ranges1
# Stupid different API...
def ranges0(*args):
return [qfun(args[1], args[0]), rfun(args[1], args[0])]
def ranges1(*args):
return [gfun(args[0]), hfun(args[0])]
ranges = [ranges0, ranges1, [a, b]]
return nquad(func, ranges, args=args)
def nquad(func, ranges, args=None, opts=None, full_output=False):
"""
Integration over multiple variables.
Wraps `quad` to enable integration over multiple variables.
Various options allow improved integration of discontinuous functions, as
well as the use of weighted integration, and generally finer control of the
integration process.
Parameters
----------
func : callable
The function to be integrated. Has arguments of ``x0, ... xn``,
``t0, tm``, where integration is carried out over ``x0, ... xn``, which
must be floats. Function signature should be
``func(x0, x1, ..., xn, t0, t1, ..., tm)``. Integration is carried out
in order. That is, integration over ``x0`` is the innermost integral,
and ``xn`` is the outermost.
If performance is a concern, this function may be a ctypes function of
the form::
f(int n, double args[n])
where ``n`` is the number of extra parameters and args is an array
of doubles of the additional parameters. This function may then
be compiled to a dynamic/shared library then imported through
``ctypes``, setting the function's argtypes to ``(c_int, c_double)``,
and the function's restype to ``(c_double)``. Its pointer may then be
passed into `nquad` normally.
This allows the underlying Fortran library to evaluate the function in
the innermost integration calls without callbacks to Python, and also
speeds up the evaluation of the function itself.
ranges : iterable object
Each element of ranges may be either a sequence of 2 numbers, or else
a callable that returns such a sequence. ``ranges[0]`` corresponds to
integration over x0, and so on. If an element of ranges is a callable,
then it will be called with all of the integration arguments available,
as well as any parametric arguments. e.g. if
``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
args : iterable object, optional
Additional arguments ``t0, ..., tn``, required by `func`, `ranges`, and
``opts``.
opts : iterable object or dict, optional
Options to be passed to `quad`. May be empty, a dict, or
a sequence of dicts or functions that return a dict. If empty, the
default options from scipy.integrate.quad are used. If a dict, the same
options are used for all levels of integraion. If a sequence, then each
element of the sequence corresponds to a particular integration. e.g.
opts[0] corresponds to integration over x0, and so on. If a callable,
the signature must be the same as for ``ranges``. The available
options together with their default values are:
- epsabs = 1.49e-08
- epsrel = 1.49e-08
- limit = 50
- points = None
- weight = None
- wvar = None
- wopts = None
For more information on these options, see `quad` and `quad_explain`.
full_output : bool, optional
Partial implementation of ``full_output`` from scipy.integrate.quad.
The number of integrand function evaluations ``neval`` can be obtained
by setting ``full_output=True`` when calling nquad.
Returns
-------
result : float
The result of the integration.
abserr : float
The maximum of the estimates of the absolute error in the various
integration results.
out_dict : dict, optional
A dict containing additional information on the integration.
See Also
--------
quad : 1-dimensional numerical integration
dblquad, tplquad : double and triple integrals
fixed_quad : fixed-order Gaussian quadrature
quadrature : adaptive Gaussian quadrature
Examples
--------
>>> from scipy import integrate
>>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
>>> points = [[lambda x1,x2,x3 : 0.2*x3 + 0.5 + 0.25*x1], [], [], []]
>>> def opts0(*args, **kwargs):
... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
>>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
... opts=[opts0,{},{},{}], full_output=True)
(1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
>>> scale = .1
>>> def func2(x0, x1, x2, x3, t0, t1):
... return x0*x1*x3**2 + np.sin(x2) + 1 + (1 if x0+t1*x1-t0>0 else 0)
>>> def lim0(x1, x2, x3, t0, t1):
... return [scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) - 1,
... scale * (x1**2 + x2 + np.cos(x3)*t0*t1 + 1) + 1]
>>> def lim1(x2, x3, t0, t1):
... return [scale * (t0*x2 + t1*x3) - 1,
... scale * (t0*x2 + t1*x3) + 1]
>>> def lim2(x3, t0, t1):
... return [scale * (x3 + t0**2*t1**3) - 1,
... scale * (x3 + t0**2*t1**3) + 1]
>>> def lim3(t0, t1):
... return [scale * (t0+t1) - 1, scale * (t0+t1) + 1]
>>> def opts0(x1, x2, x3, t0, t1):
... return {'points' : [t0 - t1*x1]}
>>> def opts1(x2, x3, t0, t1):
... return {}
>>> def opts2(x3, t0, t1):
... return {}
>>> def opts3(t0, t1):
... return {}
>>> integrate.nquad(func2, [lim0, lim1, lim2, lim3], args=(0,0),
... opts=[opts0, opts1, opts2, opts3])
(25.066666666666666, 2.7829590483937256e-13)
"""
depth = len(ranges)
ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
if args is None:
args = ()
if opts is None:
opts = [dict([])] * depth
if isinstance(opts, dict):
opts = [_OptFunc(opts)] * depth
else:
opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
return _NQuad(func, ranges, opts, full_output).integrate(*args)
class _RangeFunc(object):
def __init__(self, range_):
self.range_ = range_
def __call__(self, *args):
"""Return stored value.
*args needed because range_ can be float or func, and is called with
variable number of parameters.
"""
return self.range_
class _OptFunc(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, *args):
"""Return stored dict."""
return self.opt
class _NQuad(object):
def __init__(self, func, ranges, opts, full_output):
self.abserr = 0
self.func = func
self.ranges = ranges
self.opts = opts
self.maxdepth = len(ranges)
self.full_output = full_output
if self.full_output:
self.out_dict = {'neval': 0}
def integrate(self, *args, **kwargs):
depth = kwargs.pop('depth', 0)
if kwargs:
raise ValueError('unexpected kwargs')
# Get the integration range and options for this depth.
ind = -(depth + 1)
fn_range = self.ranges[ind]
low, high = fn_range(*args)
fn_opt = self.opts[ind]
opt = dict(fn_opt(*args))
if 'points' in opt:
opt['points'] = [x for x in opt['points'] if low <= x <= high]
if depth + 1 == self.maxdepth:
f = self.func
else:
f = partial(self.integrate, depth=depth+1)
quad_r = quad(f, low, high, args=args, full_output=self.full_output,
**opt)
value = quad_r[0]
abserr = quad_r[1]
if self.full_output:
infodict = quad_r[2]
# The 'neval' parameter in full_output returns the total
# number of times the integrand function was evaluated.
# Therefore, only the innermost integration loop counts.
if depth + 1 == self.maxdepth:
self.out_dict['neval'] += infodict['neval']
self.abserr = max(self.abserr, abserr)
if depth > 0:
return value
else:
# Final result of n-D integration with error
if self.full_output:
return value, self.abserr, self.out_dict
else:
return value, self.abserr
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
A source to be used in testing.
"""
import pytz
from six.moves import filter
from datetime import datetime, timedelta
import itertools
from six.moves import range
from zipline.protocol import (
Event,
DATASOURCE_TYPE
)
from zipline.gens.utils import hash_args
def create_trade(sid, price, amount, datetime, source_id="test_factory"):
trade = Event()
trade.source_id = source_id
trade.type = DATASOURCE_TYPE.TRADE
trade.sid = sid
trade.dt = datetime
trade.price = price
trade.close_price = price
trade.open_price = price
trade.low = price * .95
trade.high = price * 1.05
trade.volume = amount
return trade
def date_gen(start,
end,
env,
delta=timedelta(minutes=1),
repeats=None):
"""
Utility to generate a stream of dates.
"""
daily_delta = not (delta.total_seconds()
% timedelta(days=1).total_seconds())
cur = start
if daily_delta:
# if we are producing daily timestamps, we
# use midnight
cur = cur.replace(hour=0, minute=0, second=0,
microsecond=0)
def advance_current(cur):
"""
Advances the current dt skipping non market days and minutes.
"""
cur = cur + delta
if not (env.is_trading_day
if daily_delta
else env.is_market_hours)(cur):
if daily_delta:
return env.next_trading_day(cur)
else:
return env.next_open_and_close(cur)[0]
else:
return cur
# yield count trade events, all on trading days, and
# during trading hours.
while cur < end:
if repeats:
for j in range(repeats):
yield cur
else:
yield cur
cur = advance_current(cur)
class SpecificEquityTrades(object):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
count : integer representing number of trades
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, env, *args, **kwargs):
# We shouldn't get any positional arguments.
assert len(args) == 0
self.env = env
# Default to None for event_list and filter.
self.event_list = kwargs.get('event_list')
self.filter = kwargs.get('filter')
if self.event_list is not None:
# If event_list is provided, extract parameters from there
# This isn't really clean and ultimately I think this
# class should serve a single purpose (either take an
# event_list or autocreate events).
self.count = kwargs.get('count', len(self.event_list))
self.start = kwargs.get('start', self.event_list[0].dt)
self.end = kwargs.get('end', self.event_list[-1].dt)
self.delta = kwargs.get(
'delta',
self.event_list[1].dt - self.event_list[0].dt)
self.concurrent = kwargs.get('concurrent', False)
self.identifiers = kwargs.get(
'sids',
set(event.sid for event in self.event_list)
)
assets_by_identifier = {}
for identifier in self.identifiers:
assets_by_identifier[identifier] = env.asset_finder.\
lookup_generic(identifier, datetime.now())[0]
self.sids = [asset.sid for asset in assets_by_identifier.values()]
for event in self.event_list:
event.sid = assets_by_identifier[event.sid].sid
else:
# Unpack config dictionary with default values.
self.count = kwargs.get('count', 500)
self.start = kwargs.get(
'start',
datetime(2008, 6, 6, 15, tzinfo=pytz.utc))
self.end = kwargs.get(
'end',
datetime(2008, 6, 6, 15, tzinfo=pytz.utc))
self.delta = kwargs.get(
'delta',
timedelta(minutes=1))
self.concurrent = kwargs.get('concurrent', False)
self.identifiers = kwargs.get('sids', [1, 2])
assets_by_identifier = {}
for identifier in self.identifiers:
assets_by_identifier[identifier] = env.asset_finder.\
lookup_generic(identifier, datetime.now())[0]
self.sids = [asset.sid for asset in assets_by_identifier.values()]
# Hash_value for downstream sorting.
self.arg_string = hash_args(*args, **kwargs)
self.generator = self.create_fresh_generator()
def __iter__(self):
return self
def next(self):
return self.generator.next()
def __next__(self):
return next(self.generator)
def rewind(self):
self.generator = self.create_fresh_generator()
def get_hash(self):
return self.__class__.__name__ + "-" + self.arg_string
def update_source_id(self, gen):
for event in gen:
event.source_id = self.get_hash()
yield event
def create_fresh_generator(self):
if self.event_list:
event_gen = (event for event in self.event_list)
unfiltered = self.update_source_id(event_gen)
# Set up iterators for each expected field.
else:
if self.concurrent:
# in this context the count is the number of
# trades per sid, not the total.
date_generator = date_gen(
start=self.start,
end=self.end,
delta=self.delta,
repeats=len(self.sids),
env=self.env,
)
else:
date_generator = date_gen(
start=self.start,
end=self.end,
delta=self.delta,
env=self.env,
)
source_id = self.get_hash()
unfiltered = (
create_trade(
sid=sid,
price=float(i % 10) + 1.0,
amount=(i * 50) % 900 + 100,
datetime=date,
source_id=source_id,
) for (i, date), sid in itertools.product(
enumerate(date_generator), self.sids
)
)
# If we specified a sid filter, filter out elements that don't
# match the filter.
if self.filter:
filtered = filter(
lambda event: event.sid in self.filter, unfiltered)
# Otherwise just use all events.
else:
filtered = unfiltered
# Return the filtered event stream.
return filtered
# Copyright 2021 Northern.tech AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import subprocess
from .base import BaseContainerManagerNamespace
class DockerNamespace(BaseContainerManagerNamespace):
def __init__(self, name):
BaseContainerManagerNamespace.__init__(self, name)
def setup(self):
pass
def teardown(self):
pass
def execute(self, container_id, cmd):
cmd = ["docker", "exec", "{}".format(container_id)] + cmd
ret = subprocess.check_output(cmd).decode("utf-8").strip()
return ret
def cmd(self, container_id, docker_cmd, cmd=[]):
cmd = ["docker", docker_cmd] + [str(container_id)] + cmd
ret = subprocess.run(
cmd, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
return ret.stdout.decode("utf-8").strip()
def getid(self, filters):
filters.append(self.name)
filters = ["grep {}".format(f) for f in filters]
cmd = "docker ps | " + " | ".join(filters) + " | awk '{print $1}'"
ret = subprocess.check_output(cmd, shell=True).decode("utf-8").strip()
if ret == "":
raise RuntimeError("container id for {} not found".format(str(filters)))
return ret
"""The dsmr component."""
import asyncio
from asyncio import CancelledError
from contextlib import suppress
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DATA_LISTENER, DATA_TASK, DOMAIN, PLATFORMS
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up DSMR from a config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = {}
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
listener = entry.add_update_listener(async_update_options)
hass.data[DOMAIN][entry.entry_id][DATA_LISTENER] = listener
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
task = hass.data[DOMAIN][entry.entry_id][DATA_TASK]
listener = hass.data[DOMAIN][entry.entry_id][DATA_LISTENER]
# Cancel the reconnect task
task.cancel()
with suppress(CancelledError):
await task
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
listener()
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def async_update_options(hass: HomeAssistant, config_entry: ConfigEntry):
"""Update options."""
await hass.config_entries.async_reload(config_entry.entry_id)
# Copyright (C) 2008 Andi Albrecht, albrecht.andi@gmail.com
#
# This module is part of python-sqlparse and is released under
# the BSD License: http://www.opensource.org/licenses/bsd-license.php.
"""SQL formatter"""
from sqlparse import filters
from sqlparse.exceptions import SQLParseError
def validate_options(options):
"""Validates options."""
kwcase = options.get('keyword_case', None)
if kwcase not in [None, 'upper', 'lower', 'capitalize']:
raise SQLParseError('Invalid value for keyword_case: %r' % kwcase)
idcase = options.get('identifier_case', None)
if idcase not in [None, 'upper', 'lower', 'capitalize']:
raise SQLParseError('Invalid value for identifier_case: %r' % idcase)
ofrmt = options.get('output_format', None)
if ofrmt not in [None, 'sql', 'python', 'php']:
raise SQLParseError('Unknown output format: %r' % ofrmt)
strip_comments = options.get('strip_comments', False)
if strip_comments not in [True, False]:
raise SQLParseError('Invalid value for strip_comments: %r'
% strip_comments)
strip_ws = options.get('strip_whitespace', False)
if strip_ws not in [True, False]:
raise SQLParseError('Invalid value for strip_whitespace: %r'
% strip_ws)
truncate_strings = options.get('truncate_strings', None)
if truncate_strings is not None:
try:
truncate_strings = int(truncate_strings)
except (ValueError, TypeError):
raise SQLParseError('Invalid value for truncate_strings: %r'
% truncate_strings)
if truncate_strings <= 1:
raise SQLParseError('Invalid value for truncate_strings: %r'
% truncate_strings)
options['truncate_strings'] = truncate_strings
options['truncate_char'] = options.get('truncate_char', '[...]')
reindent = options.get('reindent', False)
if reindent not in [True, False]:
raise SQLParseError('Invalid value for reindent: %r'
% reindent)
elif reindent:
options['strip_whitespace'] = True
indent_tabs = options.get('indent_tabs', False)
if indent_tabs not in [True, False]:
raise SQLParseError('Invalid value for indent_tabs: %r' % indent_tabs)
elif indent_tabs:
options['indent_char'] = '\t'
else:
options['indent_char'] = ' '
indent_width = options.get('indent_width', 2)
try:
indent_width = int(indent_width)
except (TypeError, ValueError):
raise SQLParseError('indent_width requires an integer')
if indent_width < 1:
raise SQLParseError('indent_width requires an positive integer')
options['indent_width'] = indent_width
right_margin = options.get('right_margin', None)
if right_margin is not None:
try:
right_margin = int(right_margin)
except (TypeError, ValueError):
raise SQLParseError('right_margin requires an integer')
if right_margin < 10:
raise SQLParseError('right_margin requires an integer > 10')
options['right_margin'] = right_margin
return options
def build_filter_stack(stack, options):
"""Setup and return a filter stack.
Args:
stack: :class:`~sqlparse.filters.FilterStack` instance
options: Dictionary with options validated by validate_options.
"""
# Token filter
if options.get('keyword_case', None):
stack.preprocess.append(
filters.KeywordCaseFilter(options['keyword_case']))
if options.get('identifier_case', None):
stack.preprocess.append(
filters.IdentifierCaseFilter(options['identifier_case']))
if options.get('truncate_strings', None) is not None:
stack.preprocess.append(filters.TruncateStringFilter(
width=options['truncate_strings'], char=options['truncate_char']))
# After grouping
if options.get('strip_comments', False):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripCommentsFilter())
if (options.get('strip_whitespace', False)
or options.get('reindent', False)):
stack.enable_grouping()
stack.stmtprocess.append(filters.StripWhitespaceFilter())
if options.get('reindent', False):
stack.enable_grouping()
stack.stmtprocess.append(
filters.ReindentFilter(char=options['indent_char'],
width=options['indent_width']))
if options.get('right_margin', False):
stack.enable_grouping()
stack.stmtprocess.append(
filters.RightMarginFilter(width=options['right_margin']))
# Serializer
if options.get('output_format'):
frmt = options['output_format']
if frmt.lower() == 'php':
fltr = filters.OutputPHPFilter()
elif frmt.lower() == 'python':
fltr = filters.OutputPythonFilter()
else:
fltr = None
if fltr is not None:
stack.postprocess.append(fltr)
return stack
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import sys
from pants.reporting.report import Report
class MockLogger(object):
"""A standalone logger that writes to stderr.
:API: public
Useful for testing without requiring the full RunTracker reporting framework.
"""
def __init__(self, level=Report.INFO):
self._level = level
def _maybe_log(self, level, *msg_elements):
if level <= self._level:
sys.stderr.write(''.join(msg_elements))
def debug(self, *msg_elements):
"""
:API: public
"""
self._maybe_log(Report.DEBUG, *msg_elements)
def info(self, *msg_elements):
"""
:API: public
"""
self._maybe_log(Report.INFO, *msg_elements)
def warn(self, *msg_elements):
"""
:API: public
"""
self._maybe_log(Report.WARN, *msg_elements)
def error(self, *msg_elements):
"""
:API: public
"""
self._maybe_log(Report.ERROR, *msg_elements)
def fatal(self, *msg_elements):
"""
:API: public
"""
self._maybe_log(Report.FATAL, *msg_elements)
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL ().
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see .
#
##############################################################################
{
'name': 'Point of Sale',
'version': '1.0.2',
'category': 'Point Of Sale',
'sequence': 6,
'summary': 'Touchscreen Interface for Shops',
'description': """
Quick and Easy sale process
===========================
This module allows you to manage your shop sales very easily with a fully web based touchscreen interface.
It is compatible with all PC tablets and the iPad, offering multiple payment methods.
Product selection can be done in several ways:
* Using a barcode reader
* Browsing through categories of products or via a text search.
Main Features
-------------
* Fast encoding of the sale
* Choose one payment method (the quick way) or split the payment between several payment methods
* Computation of the amount of money to return
* Create and confirm the picking list automatically
* Allows the user to create an invoice automatically
* Refund previous sales
""",
'author': 'OpenERP SA',
'images': ['images/pos_touch_screen.jpeg', 'images/pos_session.jpeg', 'images/pos_analysis.jpeg','images/sale_order_pos.jpeg','images/product_pos.jpeg'],
'depends': ['sale_stock','product_images'],
'data': [
'security/point_of_sale_security.xml',
'security/ir.model.access.csv',
'wizard/pos_details.xml',
'wizard/pos_confirm.xml',
'wizard/pos_discount.xml',
'wizard/pos_open_statement.xml',
'wizard/pos_payment_report_user_view.xml',
'wizard/pos_sales_user.xml',
'wizard/pos_receipt_view.xml',
'wizard/pos_payment_report_user.xml',
'wizard/pos_payment_report.xml',
'wizard/pos_payment.xml',
'wizard/pos_box.xml',
'wizard/pos_session_opening.xml',
'point_of_sale_report.xml',
'point_of_sale_view.xml',
'point_of_sale_data.xml',
'report/pos_order_report_view.xml',
'point_of_sale_sequence.xml',
'point_of_sale_workflow.xml',
'account_statement_view.xml',
'account_statement_report.xml',
'res_users_view.xml',
'res_partner_view.xml',
],
'demo': [
'point_of_sale_demo.xml',
'account_statement_demo.xml',
'test/00_register_open.yml'
],
'test': [
'test/01_order_to_payment.yml',
'test/02_order_to_invoice.yml',
'test/point_of_sale_report.yml'
],
'installable': True,
'application': True,
'js': [
'static/lib/mousewheel/jquery.mousewheel-3.0.6.js',
'static/src/js/db.js',
'static/src/js/models.js',
'static/src/js/widget_base.js',
'static/src/js/widget_keyboard.js',
'static/src/js/widget_scrollbar.js',
'static/src/js/widgets.js',
'static/src/js/devices.js',
'static/src/js/screens.js',
'static/src/js/main.js',
],
'css': [
'static/src/css/pos.css', # this is the default css with hover effects
#'static/src/css/pos_nohover.css', # this css has no hover effects (for resistive touchscreens)
'static/src/css/keyboard.css'
],
'qweb': ['static/src/xml/pos.xml'],
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
"""
Base file for testing email sending functionality
"""
import datetime
import logging
from collections import namedtuple
from copy import deepcopy
from unittest.mock import Mock, patch
import attr
import ddt
import pytz
from django.conf import settings
from django.contrib.auth.models import User # lint-amnesty, pylint: disable=imported-auth-user
from django.db.models import Max
from edx_ace.channel import ChannelMap, ChannelType
from edx_ace.test_utils import StubPolicy, patch_policies
from edx_ace.utils.date import serialize
from freezegun import freeze_time
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.course_modes.models import CourseMode
from common.djangoapps.course_modes.tests.factories import CourseModeFactory
from lms.djangoapps.courseware.models import DynamicUpgradeDeadlineConfiguration
from lms.djangoapps.commerce.models import CommerceConfiguration
from openedx.core.djangoapps.schedules import resolvers, tasks
from openedx.core.djangoapps.schedules.resolvers import _get_datetime_beginning_of_day
from openedx.core.djangoapps.schedules.tests.factories import ScheduleConfigFactory, ScheduleFactory
from openedx.core.djangoapps.site_configuration.tests.factories import SiteConfigurationFactory, SiteFactory
from openedx.core.djangoapps.theming.tests.test_util import with_comprehensive_theme
from openedx.core.djangoapps.waffle_utils.testutils import WAFFLE_TABLES
from openedx.core.djangolib.testing.utils import FilteredQueryCountMixin
from common.djangoapps.student.models import CourseEnrollment
from common.djangoapps.student.tests.factories import UserFactory
SITE_QUERY = 1 # django_site
SITE_CONFIG_QUERY = 1 # site_configuration_siteconfiguration
SCHEDULES_QUERY = 1 # schedules_schedule
COURSE_MODES_QUERY = 1 # course_modes_coursemode
GLOBAL_DEADLINE_QUERY = 1 # courseware_dynamicupgradedeadlineconfiguration
ORG_DEADLINE_QUERY = 1 # courseware_orgdynamicupgradedeadlineconfiguration
COURSE_DEADLINE_QUERY = 1 # courseware_coursedynamicupgradedeadlineconfiguration
COMMERCE_CONFIG_QUERY = 1 # commerce_commerceconfiguration
USER_QUERY = 1 # auth_user
THEME_PREVIEW_QUERY = 1
THEME_QUERY = 1 # theming_sitetheme
SCHEDULE_CONFIG_QUERY = 1 # schedules_scheduleconfig
NUM_QUERIES_SITE_SCHEDULES = (
SITE_QUERY +
SITE_CONFIG_QUERY +
THEME_QUERY +
SCHEDULES_QUERY
)
NUM_QUERIES_FIRST_MATCH = (
NUM_QUERIES_SITE_SCHEDULES
+ GLOBAL_DEADLINE_QUERY
+ ORG_DEADLINE_QUERY
+ COURSE_DEADLINE_QUERY
+ COMMERCE_CONFIG_QUERY
)
NUM_QUERIES_PER_MESSAGE_DELIVERY = (
SITE_QUERY +
SCHEDULE_CONFIG_QUERY +
USER_QUERY +
THEME_PREVIEW_QUERY +
THEME_QUERY
)
LOG = logging.getLogger(__name__)
ExperienceTest = namedtuple('ExperienceTest', 'experience offset email_sent')
@ddt.ddt
@freeze_time('2017-08-01 00:00:00', tz_offset=0, tick=True)
class ScheduleSendEmailTestMixin(FilteredQueryCountMixin): # lint-amnesty, pylint: disable=missing-class-docstring
__test__ = False
ENABLED_CACHES = ['default']
queries_deadline_for_each_course = False
consolidates_emails_for_learner = False
def setUp(self):
super().setUp()
site = SiteFactory.create()
self.site_config = SiteConfigurationFactory.create(site=site)
ScheduleConfigFactory.create(site=self.site_config.site)
DynamicUpgradeDeadlineConfiguration.objects.create(enabled=True)
CommerceConfiguration.objects.create(checkout_on_ecommerce_service=True)
self._courses_with_verified_modes = set()
def _calculate_bin_for_user(self, user):
return user.id % self.task.num_bins
def _next_user_id(self):
"""
Get the next user ID which is a multiple of the bin count and greater
than the current largest user ID. Avoids intermittent ID collisions
with the user created in ModuleStoreTestCase.setUp().
"""
max_user_id = User.objects.aggregate(Max('id'))['id__max']
if max_user_id is None:
max_user_id = 0
num_bins = self.task.num_bins
return max_user_id + num_bins - (max_user_id % num_bins)
def _get_dates(self, offset=None): # lint-amnesty, pylint: disable=missing-function-docstring
current_day = _get_datetime_beginning_of_day(datetime.datetime.now(pytz.UTC))
offset = offset or self.expected_offsets[0]
target_day = current_day + datetime.timedelta(days=offset)
if self.resolver.schedule_date_field == 'upgrade_deadline':
upgrade_deadline = target_day
else:
upgrade_deadline = current_day + datetime.timedelta(days=7)
return current_day, offset, target_day, upgrade_deadline
def _get_template_overrides(self):
templates_override = deepcopy(settings.TEMPLATES)
templates_override[0]['OPTIONS']['string_if_invalid'] = "TEMPLATE WARNING - MISSING VARIABLE [%s]"
return templates_override
def _schedule_factory(self, offset=None, **factory_kwargs): # lint-amnesty, pylint: disable=missing-function-docstring
_, _, target_day, upgrade_deadline = self._get_dates(offset=offset)
factory_kwargs.setdefault('start_date', target_day)
factory_kwargs.setdefault('upgrade_deadline', upgrade_deadline)
factory_kwargs.setdefault('enrollment__course__self_paced', True)
# Make all schedules in the same course
factory_kwargs.setdefault('enrollment__course__run', '2012_Fall')
if hasattr(self, 'experience_type'):
factory_kwargs.setdefault('experience__experience_type', self.experience_type)
schedule = ScheduleFactory(**factory_kwargs)
course_id = schedule.enrollment.course_id
if course_id not in self._courses_with_verified_modes:
CourseModeFactory(
course_id=course_id,
mode_slug=CourseMode.VERIFIED,
expiration_datetime=datetime.datetime.now(pytz.UTC) + datetime.timedelta(days=30),
)
self._courses_with_verified_modes.add(course_id)
return schedule
def _update_schedule_config(self, schedule_config_kwargs):
"""
Updates the schedule config model by making sure the new entry
has a later timestamp.
"""
later_time = datetime.datetime.now(pytz.UTC) + datetime.timedelta(minutes=1)
with freeze_time(later_time):
ScheduleConfigFactory.create(**schedule_config_kwargs)
def test_command_task_binding(self):
assert self.command.async_send_task == self.task
def test_handle(self):
with patch.object(self.command, 'async_send_task') as mock_send:
test_day = datetime.datetime(2017, 8, 1, tzinfo=pytz.UTC)
self.command().handle(date='2017-08-01', site_domain_name=self.site_config.site.domain)
for offset in self.expected_offsets:
mock_send.enqueue.assert_any_call(
self.site_config.site,
test_day,
offset,
None
)
@patch.object(tasks, 'ace')
def test_resolver_send(self, mock_ace):
current_day, offset, target_day, _ = self._get_dates()
with patch.object(self.task, 'apply_async') as mock_apply_async:
self.task.enqueue(self.site_config.site, current_day, offset)
mock_apply_async.assert_any_call(
(self.site_config.site.id, serialize(target_day), offset, 0, None),
retry=False,
)
mock_apply_async.assert_any_call(
(self.site_config.site.id, serialize(target_day), offset, self.task.num_bins - 1, None),
retry=False,
)
assert not mock_ace.send.called
@ddt.data(1, 10, 100)
@patch.object(tasks, 'ace')
@patch.object(resolvers, 'set_custom_attribute')
def test_schedule_bin(self, schedule_count, mock_attribute, mock_ace):
with patch.object(self.task, 'async_send_task') as mock_schedule_send:
current_day, offset, target_day, upgrade_deadline = self._get_dates() # lint-amnesty, pylint: disable=unused-variable
schedules = [
self._schedule_factory() for _ in range(schedule_count)
]
bins_in_use = frozenset((self._calculate_bin_for_user(s.enrollment.user)) for s in schedules)
is_first_match = True
target_day_str = serialize(target_day)
for b in range(self.task.num_bins):
LOG.debug('Checking bin %d', b)
expected_queries = NUM_QUERIES_SITE_SCHEDULES
if b in bins_in_use:
if is_first_match:
expected_queries = (
# Since this is the first match, we need to cache all of the config models, so we run a
# query for each of those...
NUM_QUERIES_FIRST_MATCH
+ COURSE_MODES_QUERY # to cache the course modes for this course
)
is_first_match = False
with self.assertNumQueries(expected_queries, table_blacklist=WAFFLE_TABLES):
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id, target_day_str=target_day_str, day_offset=offset, bin_num=b,
))
num_schedules = mock_attribute.call_args[0][1]
if b in bins_in_use:
assert num_schedules > 0
else:
assert num_schedules == 0
assert mock_schedule_send.apply_async.call_count == schedule_count
assert not mock_ace.send.called
def test_no_course_overview(self):
current_day, offset, target_day, upgrade_deadline = self._get_dates() # lint-amnesty, pylint: disable=unused-variable
# Don't use CourseEnrollmentFactory since it creates a course overview
enrollment = CourseEnrollment.objects.create(
course_id=CourseKey.from_string('edX/toy/Not_2012_Fall'),
user=UserFactory.create(),
)
self._schedule_factory(enrollment=enrollment)
with patch.object(self.task, 'async_send_task') as mock_schedule_send:
for bin_num in range(self.task().num_bins):
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id,
target_day_str=serialize(target_day),
day_offset=offset,
bin_num=bin_num,
))
# There is no database constraint that enforces that enrollment.course_id points
# to a valid CourseOverview object. However, in that case, schedules isn't going
# to attempt to address it, and will instead simply skip those users.
# This happens 'transparently' because django generates an inner-join between
# enrollment and course_overview, and thus will skip any rows where course_overview
# is null.
assert mock_schedule_send.apply_async.call_count == 0
@ddt.data(True, False)
@patch.object(tasks, 'ace')
@patch.object(tasks, 'Message')
def test_deliver_config(self, is_enabled, mock_message, mock_ace):
user = UserFactory.create()
schedule_config_kwargs = {
'site': self.site_config.site,
self.deliver_config: is_enabled,
}
self._update_schedule_config(schedule_config_kwargs)
mock_message.from_string.return_value.recipient.lms_user_id = user.id
mock_msg = Mock()
self.deliver_task(self.site_config.site.id, mock_msg)
if is_enabled:
assert mock_ace.send.called
else:
assert not mock_ace.send.called
@ddt.data(True, False)
def test_enqueue_config(self, is_enabled):
schedule_config_kwargs = {
'site': self.site_config.site,
self.enqueue_config: is_enabled,
}
self._update_schedule_config(schedule_config_kwargs)
current_datetime = datetime.datetime(2017, 8, 1, tzinfo=pytz.UTC)
with patch.object(self.task, 'apply_async') as mock_apply_async:
self.task.enqueue(self.site_config.site, current_datetime, 3)
if is_enabled:
assert mock_apply_async.called
else:
assert not mock_apply_async.called
@patch.object(tasks, 'ace')
@ddt.data(
((['filtered_org'], [], 1)),
(([], ['filtered_org'], 2))
)
@ddt.unpack
def test_site_config(self, this_org_list, other_org_list, expected_message_count, mock_ace):
filtered_org = 'filtered_org'
unfiltered_org = 'unfiltered_org'
this_config = SiteConfigurationFactory.create(
site_values={'course_org_filter': this_org_list}
)
other_config = SiteConfigurationFactory.create(
site_values={'course_org_filter': other_org_list}
)
for config in (this_config, other_config):
ScheduleConfigFactory.create(site=config.site)
user1 = UserFactory.create(id=self._next_user_id())
user2 = UserFactory.create(id=user1.id + self.task.num_bins)
current_day, offset, target_day, upgrade_deadline = self._get_dates() # lint-amnesty, pylint: disable=unused-variable
self._schedule_factory(
enrollment__course__org=filtered_org,
enrollment__user=user1,
)
self._schedule_factory(
enrollment__course__org=unfiltered_org,
enrollment__user=user1,
)
self._schedule_factory(
enrollment__course__org=unfiltered_org,
enrollment__user=user2,
)
with patch.object(self.task, 'async_send_task') as mock_schedule_send:
self.task().apply(kwargs=dict(
site_id=this_config.site.id, target_day_str=serialize(target_day), day_offset=offset, bin_num=0
))
assert mock_schedule_send.apply_async.call_count == expected_message_count
assert not mock_ace.send.called
@ddt.data(True, False)
def test_course_end(self, has_course_ended):
user1 = UserFactory.create(id=self._next_user_id())
current_day, offset, target_day, upgrade_deadline = self._get_dates() # lint-amnesty, pylint: disable=unused-variable
end_date_offset = -2 if has_course_ended else 2
self._schedule_factory(
enrollment__user=user1,
enrollment__course__start=current_day - datetime.timedelta(days=30),
enrollment__course__end=current_day + datetime.timedelta(days=end_date_offset)
)
with patch.object(self.task, 'async_send_task') as mock_schedule_send:
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset, bin_num=0,
))
if has_course_ended:
assert not mock_schedule_send.apply_async.called
else:
assert mock_schedule_send.apply_async.called
@patch.object(tasks, 'ace')
def test_multiple_target_schedules(self, mock_ace):
user = UserFactory.create()
current_day, offset, target_day, upgrade_deadline = self._get_dates() # lint-amnesty, pylint: disable=unused-variable
num_courses = 3
for course_index in range(num_courses):
self._schedule_factory(
enrollment__user=user,
enrollment__course__id=CourseKey.from_string(f'edX/toy/course{course_index}')
)
# 2 queries per course, one for the course opt out and one for the course modes
# one query for course modes for the first schedule if we aren't checking the deadline for each course
additional_course_queries = (num_courses * 2) - 1 if self.queries_deadline_for_each_course else 1
expected_query_count = NUM_QUERIES_FIRST_MATCH + additional_course_queries
with self.assertNumQueries(expected_query_count, table_blacklist=WAFFLE_TABLES):
with patch.object(self.task, 'async_send_task') as mock_schedule_send:
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset,
bin_num=self._calculate_bin_for_user(user),
))
expected_call_count = 1 if self.consolidates_emails_for_learner else num_courses
assert mock_schedule_send.apply_async.call_count == expected_call_count
assert not mock_ace.send.called
@ddt.data(
1, 10
)
def test_templates(self, message_count):
for offset in self.expected_offsets:
self._assert_template_for_offset(offset, message_count)
self.clear_caches()
def _assert_template_for_offset(self, offset, message_count): # lint-amnesty, pylint: disable=missing-function-docstring
current_day, offset, target_day, upgrade_deadline = self._get_dates(offset) # lint-amnesty, pylint: disable=unused-variable
user = UserFactory.create()
for course_index in range(message_count):
self._schedule_factory(
offset=offset,
enrollment__user=user,
enrollment__course__id=CourseKey.from_string(f'edX/toy/course{course_index}')
)
patch_policies(self, [StubPolicy([ChannelType.PUSH])])
mock_channel = Mock(
channel_type=ChannelType.EMAIL,
action_links=[],
tracker_image_sources=[],
)
channel_map = ChannelMap([
['sailthru', mock_channel],
])
sent_messages = []
with self.settings(TEMPLATES=self._get_template_overrides()):
with patch.object(self.task, 'async_send_task') as mock_schedule_send:
mock_schedule_send.apply_async = lambda args, *_a, **_kw: sent_messages.append(args)
num_expected_queries = NUM_QUERIES_FIRST_MATCH
if self.queries_deadline_for_each_course:
# one query per course for opt-out and one for course modes
num_expected_queries += (message_count * 2) - 1
else:
num_expected_queries += 1
with self.assertNumQueries(num_expected_queries, table_blacklist=WAFFLE_TABLES):
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset,
bin_num=self._calculate_bin_for_user(user),
))
num_expected_messages = 1 if self.consolidates_emails_for_learner else message_count
assert len(sent_messages) == num_expected_messages
with self.assertNumQueries(NUM_QUERIES_PER_MESSAGE_DELIVERY):
with patch('openedx.core.djangoapps.schedules.tasks.segment.track') as mock_segment_track:
with patch('edx_ace.channel.channels', return_value=channel_map):
self.deliver_task(*sent_messages[0])
assert mock_segment_track.call_count == 1
assert mock_channel.deliver.call_count == 1
for (_name, (_msg, email), _kwargs) in mock_channel.deliver.mock_calls:
for template in attr.astuple(email):
assert 'TEMPLATE WARNING' not in template
assert '{{' not in template
assert '}}' not in template
return mock_channel.deliver.mock_calls
def _check_if_email_sent_for_experience(self, test_config): # lint-amnesty, pylint: disable=missing-function-docstring
current_day, offset, target_day, _ = self._get_dates(offset=test_config.offset) # lint-amnesty, pylint: disable=unused-variable
kwargs = {
'offset': offset
}
if test_config.experience is None:
kwargs['experience'] = None
else:
kwargs['experience__experience_type'] = test_config.experience
schedule = self._schedule_factory(**kwargs)
with patch.object(tasks, 'ace') as mock_ace:
self.task().apply(kwargs=dict(
site_id=self.site_config.site.id, target_day_str=serialize(target_day), day_offset=offset,
bin_num=self._calculate_bin_for_user(schedule.enrollment.user),
))
assert mock_ace.send.called == test_config.email_sent
@with_comprehensive_theme('red-theme')
def test_templates_with_theme(self):
calls_to_deliver = self._assert_template_for_offset(self.expected_offsets[0], 1)
_name, (_msg, email), _kwargs = calls_to_deliver[0]
assert 'TEST RED THEME MARKER' in email.body_html
import numpy
import pdb
'''This module contains various I/O helper methods'''
def get_coord(filename):
f = file(filename,'r')
# first count number of atoms to instantiate coord array
n = 0
for line in f:
if 'ATOM' in line:
n += 1
coord = numpy.empty((n,3))
pdb_text = [] # will hold the pdb file text
i = 0
f.seek(0)
for line in f:
if 'ATOM' in line:
pdb_text.append(line)
coord[i,0] = float(line[31:38])
coord[i,1] = float(line[39:46])
coord[i,2] = float(line[47:54])
i += 1
f.close()
assert(i == n)
return coord, pdb_text
def writeseqpdb(mpos,text,posline,move):
'''Deprecated'''
# multi file pdb output
j=0
write=''
for i in posline:
words=text[i,][0:30]
coordstr=''
coordstr=coordstr+str('%8.3f') % mpos[j,0]
coordstr=coordstr+str('%8.3f') % mpos[j,1]
coordstr=coordstr+str('%8.3f') % mpos[j,2]
coordstr=coordstr+'\r\n'
j+=1
text[i]=words+coordstr
f=file(str(move)+'.pdb','w')
for k in range(len(text)): #don't want 'END'
write=write+text[k]
f.write(write)
f.close
def writepdb(mpos,text,posline,move,filename):
'''Deprecated'''
# 1 file pdb output
j=0
for i in posline:
words=[text[i][0:30],'%8.3f' %(mpos[j,0]),'%8.3f' %(mpos[j,1]),'%8.3f'%(mpos[j,2]),'\r\n']
j=j+1
text[i]="".join(words)
f=file(filename,'w')
f.write('MODEL %i\r\n' % (move)) #check moves here
write="".join(text[0:-1])
f.write(write)
f.write('ENDMDL\r\n')
f.close
def addtopdb(mpos,coordtext,move,filename):
'''Deprecated'''
# 1 file pdb output
for i in range(len(coordtext)):
words=[coordtext[i][0:30],'%8.3f' %(mpos[i,0]),'%8.3f' %(mpos[i,1]),'%8.3f'%(mpos[i,2]),'\r\n']
coordtext[i]="".join(words)
f=file(filename,'a')
f.write('MODEL %i\r\n' % (move))
write="".join(coordtext)
f.write(write)
f.write('ENDMDL\r\n')
f.close
def addconnect(filename,k):
#filename = .pdb file for linear chain polymer/protein without bonds
#k = number of beads in polymer/protein
f=open(filename,'a')
text=''
text=text+'CONECT 1 2\r\n'
for i in range(2,k):
text=text+'CONECT '
text=text+str('%3.0f') % i
text=text+' '
text=text+str('%3.0f') % (i-1)
text=text+' '
text=text+str('%3.0f') % (i+1)
text=text+'\r\n'
text=text+'CONECT '+str(k)+' '+str(k-1)+'\r\nEND\r\n'
f.write(text)
f.close()
def getmovietransform(nativecoord):
'''Deprecated'''
nc=nativecoord.copy()
translate= nc[0,:]
nc -= translate
BC = nc[1,:]
x1 = BC/numpy.dot(BC,BC)**.5
AB = numpy.array([.5,.5,.5]); #random, but constant for all simulations
y1 = AB-numpy.dot(AB,BC)/numpy.dot(BC,BC)*BC
y1 = y1/numpy.sum(y1**2)**.5
z1 = numpy.cross(x1,y1)
return numpy.array([x1,y1,z1])
def getmovietransform_old(nativecoord):
'''Deprecated'''
nc = nativecoord.copy()
center = len(nc)/2
translate = nc[center,:]
translate = translate.copy()
for i in range(len(nc)):
nc[i,:] -= translate
BC = nc[center+1,:]
x1 = BC/numpy.dot(BC,BC)**.5
AB = [.5,.5,.5]; #random, but constant for all simulations
y1 = AB-numpy.dot(AB,BC)/numpy.dot(BC,BC)*BC
y1 = y1/numpy.dot(y1,y1)**.5
z1 = numpy.cross(x1,y1)
return [x1,y1,z1]
def moviecoord(mpos123,transform):
'''Deprecated'''
mpos = mpos123.copy()
mpos[0,:] = numpy.zeros(3)
bond = mpos123[1:len(mpos123),:]-mpos123[0:-1,:]
bond = numpy.dot(bond,transform)
for i in xrange(len(mpos)-1):
mpos[i+1,:] = mpos[i,:]+bond[i,:]
return mpos
def moviecoord_old(mpos123,transform):
'''Deprecated'''
mpos=mpos123.copy()
center=len(mpos)/2
translate=mpos[center,:]
mpos-=translate
for i in range(center,len(mpos)-1):
BC=mpos123[i+1,:]-mpos123[i,:]
BCnew=dot(transform,BC.transpose())
mpos[i+1,:]=mpos[i,:]+BCnew
for i in range(center,0,-1):
BC=mpos123[i-1,:]-mpos123[i,:]
BCnew=dot(transform,BC.transpose())
mpos[i-1,:]=mpos[i,:]+BCnew
return mpos
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Python source expertise for coverage.py"""
import os.path
import types
import zipimport
from coverage import env, files
from coverage.misc import (
contract, CoverageException, expensive, NoSource, join_regex, isolate_module,
)
from coverage.parser import PythonParser
from coverage.phystokens import source_token_lines, source_encoding
from coverage.plugin import FileReporter
os = isolate_module(os)
@contract(returns='bytes')
def read_python_source(filename):
"""Read the Python source text from `filename`.
Returns bytes.
"""
with open(filename, "rb") as f:
return f.read().replace(b"\r\n", b"\n").replace(b"\r", b"\n")
@contract(returns='unicode')
def get_python_source(filename):
"""Return the source code, as unicode."""
base, ext = os.path.splitext(filename)
if ext == ".py" and env.WINDOWS:
exts = [".py", ".pyw"]
else:
exts = [ext]
for ext in exts:
try_filename = base + ext
if os.path.exists(try_filename):
# A regular text file: open it.
source = read_python_source(try_filename)
break
# Maybe it's in a zip file?
source = get_zip_bytes(try_filename)
if source is not None:
break
else:
# Couldn't find source.
exc_msg = "No source for code: '%s'.\n" % (filename,)
exc_msg += "Aborting report output, consider using -i."
raise NoSource(exc_msg)
# Replace \f because of http://bugs.python.org/issue19035
source = source.replace(b'\f', b' ')
source = source.decode(source_encoding(source), "replace")
# Python code should always end with a line with a newline.
if source and source[-1] != '\n':
source += '\n'
return source
@contract(returns='bytes|None')
def get_zip_bytes(filename):
"""Get data from `filename` if it is a zip file path.
Returns the bytestring data read from the zip file, or None if no zip file
could be found or `filename` isn't in it. The data returned will be
an empty string if the file is empty.
"""
markers = ['.zip'+os.sep, '.egg'+os.sep]
for marker in markers:
if marker in filename:
parts = filename.split(marker)
try:
zi = zipimport.zipimporter(parts[0]+marker[:-1])
except zipimport.ZipImportError:
continue
try:
data = zi.get_data(parts[1])
except IOError:
continue
return data
return None
class PythonFileReporter(FileReporter):
"""Report support for a Python file."""
def __init__(self, morf, coverage=None):
self.coverage = coverage
if hasattr(morf, '__file__'):
filename = morf.__file__
elif isinstance(morf, types.ModuleType):
# A module should have had .__file__, otherwise we can't use it.
# This could be a PEP-420 namespace package.
raise CoverageException("Module {0} has no file".format(morf))
else:
filename = morf
filename = files.unicode_filename(filename)
# .pyc files should always refer to a .py instead.
if filename.endswith(('.pyc', '.pyo')):
filename = filename[:-1]
elif filename.endswith('$py.class'): # Jython
filename = filename[:-9] + ".py"
super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
if hasattr(morf, '__name__'):
name = morf.__name__
name = name.replace(".", os.sep) + ".py"
name = files.unicode_filename(name)
else:
name = files.relative_filename(filename)
self.relname = name
self._source = None
self._parser = None
self._statements = None
self._excluded = None
@contract(returns='unicode')
def relative_filename(self):
return self.relname
@property
def parser(self):
"""Lazily create a :class:`PythonParser`."""
if self._parser is None:
self._parser = PythonParser(
filename=self.filename,
exclude=self.coverage._exclude_regex('exclude'),
)
self._parser.parse_source()
return self._parser
def lines(self):
"""Return the line numbers of statements in the file."""
return self.parser.statements
def excluded_lines(self):
"""Return the line numbers of statements in the file."""
return self.parser.excluded
def translate_lines(self, lines):
return self.parser.translate_lines(lines)
def translate_arcs(self, arcs):
return self.parser.translate_arcs(arcs)
@expensive
def no_branch_lines(self):
no_branch = self.parser.lines_matching(
join_regex(self.coverage.config.partial_list),
join_regex(self.coverage.config.partial_always_list)
)
return no_branch
@expensive
def arcs(self):
return self.parser.arcs()
@expensive
def exit_counts(self):
return self.parser.exit_counts()
def missing_arc_description(self, start, end, executed_arcs=None):
return self.parser.missing_arc_description(start, end, executed_arcs)
@contract(returns='unicode')
def source(self):
if self._source is None:
self._source = get_python_source(self.filename)
return self._source
def should_be_python(self):
"""Does it seem like this file should contain Python?
This is used to decide if a file reported as part of the execution of
a program was really likely to have contained Python in the first
place.
"""
# Get the file extension.
_, ext = os.path.splitext(self.filename)
# Anything named *.py* should be Python.
if ext.startswith('.py'):
return True
# A file with no extension should be Python.
if not ext:
return True
# Everything else is probably not Python.
return False
def source_token_lines(self):
return source_token_lines(self.source())
# -*- coding: utf-8 -*-
# Open Source Initiative OSI - The MIT License (MIT):Licensing
#
# The MIT License (MIT)
# Copyright (c) 2012 DotCloud Inc (opensource@dotcloud.com)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import msgpack
import gevent.pool
import gevent.queue
import gevent.event
import gevent.local
import gevent.lock
import gevent_zmq as zmq
from .context import Context
class Sender(object):
def __init__(self, socket):
self._socket = socket
self._send_queue = gevent.queue.Channel()
self._send_task = gevent.spawn(self._sender)
def __del__(self):
self.close()
def close(self):
if self._send_task:
self._send_task.kill()
def _sender(self):
running = True
for parts in self._send_queue:
for i in xrange(len(parts) - 1):
try:
self._socket.send(parts[i], flags=zmq.SNDMORE)
except gevent.GreenletExit:
if i == 0:
return
running = False
self._socket.send(parts[i], flags=zmq.SNDMORE)
self._socket.send(parts[-1])
if not running:
return
def __call__(self, parts):
self._send_queue.put(parts)
class Receiver(object):
def __init__(self, socket):
self._socket = socket
self._recv_queue = gevent.queue.Channel()
self._recv_task = gevent.spawn(self._recver)
def __del__(self):
self.close()
def close(self):
if self._recv_task:
self._recv_task.kill()
def _recver(self):
running = True
while True:
parts = []
while True:
try:
part = self._socket.recv()
except gevent.GreenletExit:
running = False
if len(parts) == 0:
return
part = self._socket.recv()
parts.append(part)
if not self._socket.getsockopt(zmq.RCVMORE):
break
if not running:
break
self._recv_queue.put(parts)
def __call__(self):
return self._recv_queue.get()
class Event(object):
__slots__ = [ '_name', '_args', '_header' ]
def __init__(self, name, args, context, header=None):
self._name = name
self._args = args
if header is None:
self._header = {
'message_id': context.new_msgid(),
'v': 3
}
else:
self._header = header
@property
def header(self):
return self._header
@property
def name(self):
return self._name
@name.setter
def name(self, v):
self._name = v
@property
def args(self):
return self._args
def pack(self):
return msgpack.Packer().pack((self._header, self._name, self._args))
@staticmethod
def unpack(blob):
unpacker = msgpack.Unpacker()
unpacker.feed(blob)
unpacked_msg = unpacker.unpack()
try:
(header, name, args) = unpacked_msg
except Exception as e:
raise Exception('invalid msg format "{0}": {1}'.format(
unpacked_msg, e))
# Backward compatibility
if not isinstance(header, dict):
header = {}
return Event(name, args, None, header)
def __str__(self, ignore_args=False):
if ignore_args:
args = '[...]'
else:
args = self._args
try:
args = '<<{0}>>'.format(str(self.unpack(self._args)))
except:
pass
return '{0} {1} {2}'.format(self._name, self._header,
args)
class Events(object):
def __init__(self, zmq_socket_type, context=None):
self._zmq_socket_type = zmq_socket_type
self._context = context or Context.get_instance()
self._socket = zmq.Socket(self._context, zmq_socket_type)
self._send = self._socket.send_multipart
self._recv = self._socket.recv_multipart
if zmq_socket_type in (zmq.PUSH, zmq.PUB, zmq.DEALER, zmq.ROUTER):
self._send = Sender(self._socket)
if zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER):
self._recv = Receiver(self._socket)
@property
def recv_is_available(self):
return self._zmq_socket_type in (zmq.PULL, zmq.SUB, zmq.DEALER, zmq.ROUTER)
def __del__(self):
try:
if not self._socket.closed:
self.close()
except AttributeError:
pass
def close(self):
try:
self._send.close()
except AttributeError:
pass
try:
self._recv.close()
except AttributeError:
pass
self._socket.close()
def _resolve_endpoint(self, endpoint, resolve=True):
if resolve:
endpoint = self._context.hook_resolve_endpoint(endpoint)
if isinstance(endpoint, (tuple, list)):
r = []
for sub_endpoint in endpoint:
r.extend(self._resolve_endpoint(sub_endpoint, resolve))
return r
return [endpoint]
def connect(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.connect(endpoint_))
return r
def bind(self, endpoint, resolve=True):
r = []
for endpoint_ in self._resolve_endpoint(endpoint, resolve):
r.append(self._socket.bind(endpoint_))
return r
def create_event(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = Event(name, args, context=self._context)
for k, v in xheader.items():
if k == 'zmqid':
continue
event.header[k] = v
return event
def emit_event(self, event, identity=None):
if identity is not None:
parts = list(identity)
parts.extend(['', event.pack()])
elif self._zmq_socket_type in (zmq.DEALER, zmq.ROUTER):
parts = ('', event.pack())
else:
parts = (event.pack(),)
self._send(parts)
def emit(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = self.create_event(name, args, xheader)
identity = xheader.get('zmqid', None)
return self.emit_event(event, identity)
def recv(self):
parts = self._recv()
if len(parts) == 1:
identity = None
blob = parts[0]
else:
identity = parts[0:-2]
blob = parts[-1]
event = Event.unpack(blob)
if identity is not None:
event.header['zmqid'] = identity
return event
def setsockopt(self, *args):
return self._socket.setsockopt(*args)
@property
def context(self):
return self._context
class WrappedEvents(object):
def __init__(self, channel):
self._channel = channel
def close(self):
pass
@property
def recv_is_available(self):
return self._channel.recv_is_available
def create_event(self, name, args, xheader=None):
xheader = {} if xheader is None else xheader
event = Event(name, args, self._channel.context)
event.header.update(xheader)
return event
def emit_event(self, event, identity=None):
event_payload = (event.header, event.name, event.args)
wrapper_event = self._channel.create_event('w', event_payload)
self._channel.emit_event(wrapper_event)
def emit(self, name, args, xheader=None):
wrapper_event = self.create_event(name, args, xheader)
self.emit_event(wrapper_event)
def recv(self, timeout=None):
wrapper_event = self._channel.recv()
(header, name, args) = wrapper_event.args
return Event(name, args, None, header)
@property
def context(self):
return self._channel.context
"""
GVM
===========
This module provides tools for installing `GVM`_ : the Groovy enVironment Manager
.. _GVM: http://gvmtool.net/
"""
from fabric.api import run
from fabric.contrib.files import sed
from fabtools.system import UnsupportedFamily, distrib_family
from fabtools.require.deb import packages as require_deb_packages
from fabtools.require.oracle_jdk import installed as java
from fabtools.require.pkg import packages as require_pkg_packages
from fabtools.require.rpm import packages as require_rpm_packages
def install(java_version=None):
"""
Install dependencies (curl and unzip) and Install GVM
::
import fabtools
# Install GVM
fabtools.gvm.install()
"""
res = run('gvm help', quiet=True)
if res.failed:
family = distrib_family()
packages = ['curl', 'unzip']
if family == 'debian':
require_deb_packages(packages)
elif family == 'redhat':
require_rpm_packages(packages)
elif family == 'sun':
require_pkg_packages(packages)
else:
raise UnsupportedFamily(supported=['debian', 'redhat', 'sun'])
if java_version is None:
java()
else:
java(version=java_version)
run('curl -s get.gvmtool.net | bash')
user = run('whoami')
run('source "/home/%s/.gvm/bin/gvm-init.sh"' % user)
configFile = "/home/%s/.gvm/etc/config" % user
sed(configFile, 'gvm_auto_answer=false', 'gvm_auto_answer=true')
def install_candidate(candidate, version=None, java_version=None):
"""
Install a candidate
::
import fabtools
# Install a GVM candidate (For example Groovy)
fabtools.gvm.install_candidate('groovy')
"""
install(java_version)
if version is None:
cmd = 'gvm install %s' % candidate
else:
cmd = 'gvm install %s %s' % (candidate, version)
run(cmd)
# coding: utf-8
from msgpack._version import version
from msgpack.exceptions import *
from collections import namedtuple
class ExtType(namedtuple('ExtType', 'code data')):
"""ExtType represents ext type in msgpack."""
def __new__(cls, code, data):
if not isinstance(code, int):
raise TypeError("code must be int")
if not isinstance(data, bytes):
raise TypeError("data must be bytes")
if not 0 <= code <= 127:
raise ValueError("code must be 0~127")
return super(ExtType, cls).__new__(cls, code, data)
import os
if os.environ.get('MSGPACK_PUREPYTHON'):
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
else:
try:
from msgpack._packer import Packer
from msgpack._unpacker import unpack, unpackb, Unpacker
except ImportError:
from msgpack.fallback import Packer, unpack, unpackb, Unpacker
def pack(o, stream, **kwargs):
"""
Pack object `o` and write it to `stream`
See :class:`Packer` for options.
"""
packer = Packer(**kwargs)
stream.write(packer.pack(o))
def packb(o, **kwargs):
"""
Pack object `o` and return packed bytes
See :class:`Packer` for options.
"""
return Packer(**kwargs).pack(o)
# alias for compatibility to simplejson/marshal/pickle.
load = unpack
loads = unpackb
dump = pack
dumps = packb
"""
Copyright (c) 2003-2007 Gustavo Niemeyer
This module offers extensions to the standard python 2.3+
datetime module.
"""
__author__ = "Gustavo Niemeyer "
__license__ = "PSF License"
import datetime
__all__ = ["easter", "EASTER_JULIAN", "EASTER_ORTHODOX", "EASTER_WESTERN"]
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
def easter(year, method=EASTER_WESTERN):
"""
This method was ported from the work done by GM Arts,
on top of the algorithm by Claus Tondering, which was
based in part on the algorithm of Ouding (1940), as
quoted in "Explanatory Supplement to the Astronomical
Almanac", P. Kenneth Seidelmann, editor.
This algorithm implements three different easter
calculation methods:
1 - Original calculation in Julian calendar, valid in
dates after 326 AD
2 - Original method, with date converted to Gregorian
calendar, valid in years 1583 to 4099
3 - Revised method, in Gregorian calendar, valid in
years 1583 to 4099 as well
These methods are represented by the constants:
EASTER_JULIAN = 1
EASTER_ORTHODOX = 2
EASTER_WESTERN = 3
The default method is method 3.
More about the algorithm may be found at:
http://users.chariot.net.au/~gmarts/eastalg.htm
and
http://www.tondering.dk/claus/calendar.html
"""
if not (1 <= method <= 3):
raise ValueError, "invalid method"
# g - Golden year - 1
# c - Century
# h - (23 - Epact) mod 30
# i - Number of days from March 21 to Paschal Full Moon
# j - Weekday for PFM (0=Sunday, etc)
# p - Number of days from March 21 to Sunday on or before PFM
# (-6 to 28 methods 1 & 3, to 56 for method 2)
# e - Extra days to add for method 2 (converting Julian
# date to Gregorian date)
y = year
g = y % 19
e = 0
if method < 3:
# Old method
i = (19*g+15)%30
j = (y+y//4+i)%7
if method == 2:
# Extra dates to convert Julian to Gregorian date
e = 10
if y > 1600:
e = e+y//100-16-(y//100-16)//4
else:
# New method
c = y//100
h = (c-c//4-(8*c+13)//25+19*g+15)%30
i = h-(h//28)*(1-(h//28)*(29//(h+1))*((21-g)//11))
j = (y+y//4+i+2-c+c//4)%7
# p can be from -6 to 56 corresponding to dates 22 March to 23 May
# (later dates apply to method 2, although 23 May never actually occurs)
p = i-j+e
d = 1+(p+27+(p+6)//40)%31
m = 3+(p+26)//30
return datetime.date(int(y),int(m),int(d))
#!/usr/bin/env python
# -*- coding: utf-8 -*- #
from pelican import signals
import html5lib
import six
RAW_FOOTNOTE_CONTAINERS = ["code"]
def getText(node, recursive=False):
"""Get all the text associated with this node.
With recursive == True, all text from child nodes is retrieved."""
L = [u'']
for n in node.childNodes:
if n.nodeType in (node.TEXT_NODE, node.CDATA_SECTION_NODE):
L.append(n.data)
else:
if not recursive:
return None
L.append(getText(n))
return u''.join(L)
def sequence_gen(genlist):
for gen in genlist:
for elem in gen:
yield elem
def parse_for_footnotes(article_or_page_generator):
all_content = [
getattr(article_or_page_generator, attr, None) \
for attr in [u'articles', u'drafts', u'pages']]
all_content = [x for x in all_content if x is not None]
for article in sequence_gen(all_content):
if u"[ref]" in article._content and u"[/ref]" in article._content:
content = article._content.replace(u"[ref]", u"").replace(u"[/ref]",
u"")
parser = html5lib.HTMLParser(tree=html5lib.getTreeBuilder(u"dom"))
dom = parser.parse(content)
endnotes = []
count = 0
for footnote in dom.getElementsByTagName(u"x-simple-footnote"):
pn = footnote
leavealone = False
while pn:
if pn.nodeName in RAW_FOOTNOTE_CONTAINERS:
leavealone = True
break
pn = pn.parentNode
if leavealone:
continue
count += 1
fnid = u"sf-%s-%s" % (article.slug, count)
fnbackid = u"%s-back" % (fnid,)
endnotes.append((footnote, fnid, fnbackid))
number = dom.createElement(u"sup")
number.setAttribute(u"id", fnbackid)
numbera = dom.createElement(u"a")
numbera.setAttribute(u"href", u"#%s" % fnid)
numbera.setAttribute(u"class", u"simple-footnote")
numbera.appendChild(dom.createTextNode(six.text_type(count)))
txt = getText(footnote, recursive=True).replace(u"\n", u" ")
numbera.setAttribute(u"title", txt)
number.appendChild(numbera)
footnote.parentNode.insertBefore(number, footnote)
if endnotes:
ol = dom.createElement(u"ol")
ol.setAttribute(u"class", u"simple-footnotes")
for e, fnid, fnbackid in endnotes:
li = dom.createElement(u"li")
li.setAttribute(u"id", fnid)
while e.firstChild:
li.appendChild(e.firstChild)
backlink = dom.createElement(u"a")
backlink.setAttribute(u"href", u"#%s" % fnbackid)
backlink.setAttribute(u"class", u"simple-footnote-back")
backlink.appendChild(dom.createTextNode(u'\u21a9'))
li.appendChild(dom.createTextNode(u" "))
li.appendChild(backlink)
ol.appendChild(li)
e.parentNode.removeChild(e)
dom.getElementsByTagName(u"body")[0].appendChild(ol)
s = html5lib.serializer.HTMLSerializer(omit_optional_tags=False, quote_attr_values='legacy')
output_generator = s.serialize(
html5lib.treewalkers.getTreeWalker(u"dom")(dom.getElementsByTagName(u"body")[0]))
article._content = u"".join(list(output_generator)).replace(
u"", u"[ref]").replace(u"", u"[/ref]").replace(
u"", u"").replace(u"", u"")
def register():
signals.article_generator_finalized.connect(parse_for_footnotes)
signals.page_generator_finalized.connect(parse_for_footnotes)
from __future__ import division, print_function
import multiprocessing as mp
from multiprocessing.managers import SyncManager
class Gecko(object):
"""
Base class that handles setup/teardown processes
['_Popen', '__class__', '__delattr__', '__dict__', '__doc__', '__format__',
'__getattribute__', '__hash__', '__init__', '__module__', '__new__',
'__reduce__', '__reduce_ex__', '__repr__', '__setattr__', '__sizeof__',
'__str__', '__subclasshook__', '__weakref__', '_authkey', '_bootstrap',
'_daemonic', '_identity', '_name', '_parent_pid', '_popen', '_tempdir',
'authkey', 'daemon', 'exitcode', 'ident', 'is_alive', 'join', 'name', 'pid',
'run', 'start', 'terminate']
address=('127.0.0.1', 5000), authkey='abc'
"""
def __init__(self, ps, address=('127.0.0.1', 8888), authkey='hi', to=1.0):
print('CPU:', mp.cpu_count())
self.ps = ps
self.mgr = mp.Manager()
# self.mgr = SyncManager(address=address, authkey=authkey)
# self.mgr.start()
self.namespace = self.mgr.Namespace()
self.event = mp.Event()
self.timeout = to
def start(self):
self.event.set()
plist = []
# for i, (mod, fun, args) in enumerate(ps['processes']):
for (mod, fun, args) in self.ps['processes']:
m = __import__(mod)
ff = getattr(m, fun)
if args is None:
p = mp.Process(name=fun, target=ff, args=(self.namespace, self.event))
else:
p = mp.Process(name=fun, target=ff, args=(self.namespace, self.event, args))
p.start()
print('> Started:', mod + '.' + fun)
plist.append(p)
self.plist = plist
def end(self):
print('Main loop killing processes')
# self.mgr.shutdown()
for p in self.plist:
p.join(timeout=self.timeout)
if p.is_alive():
print('had to kill a process:', p.name)
p.terminate()
else:
print('clean exit:', p.name)
# -*- coding: utf-8 -*-
import logging
import simplejson
import os
import openerp
import time
import random
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import module_boot, login_redirect
_logger = logging.getLogger(__name__)
class PosController(http.Controller):
@http.route('/pos/web', type='http', auth='user')
def a(self, debug=False, **k):
cr, uid, context, session = request.cr, request.uid, request.context, request.session
if not session.uid:
return login_redirect()
PosSession = request.registry['pos.session']
pos_session_ids = PosSession.search(cr, uid, [('state','=','opened'),('user_id','=',session.uid)], context=context)
PosSession.login(cr,uid,pos_session_ids,context=context)
modules = simplejson.dumps(module_boot(request.db))
init = """
var wc = new s.web.WebClient();
wc.show_application = function(){
wc.action_manager.do_action("pos.ui");
};
wc.setElement($(document.body));
wc.start();
"""
html = request.registry.get('ir.ui.view').render(cr, session.uid,'point_of_sale.index',{
'modules': modules,
'init': init,
})
return html
#!/usr/bin/python
# (c) Vincent Van de Kussen
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
DOCUMENTATION = '''
---
module: rhn_channel
short_description: Adds or removes Red Hat software channels
description:
- Adds or removes Red Hat software channels
version_added: "1.1"
author: "Vincent Van der Kussen (@vincentvdk)"
notes:
- this module fetches the system id from RHN.
requirements:
- none
options:
name:
description:
- name of the software channel
required: true
default: null
sysname:
description:
- name of the system as it is known in RHN/Satellite
required: true
default: null
state:
description:
- whether the channel should be present or not
required: false
default: present
url:
description:
- The full url to the RHN/Satellite api
required: true
user:
description:
- RHN/Satellite user
required: true
password:
description:
- "the user's password"
required: true
'''
EXAMPLES = '''
- rhn_channel: name=rhel-x86_64-server-v2vwin-6 sysname=server01 url=https://rhn.redhat.com/rpc/api user=rhnuser password=guessme
'''
import xmlrpclib
from operator import itemgetter
import re
# ------------------------------------------------------- #
def get_systemid(client, session, sysname):
systems = client.system.listUserSystems(session)
for system in systems:
if system.get('name') == sysname:
idres = system.get('id')
idd = int(idres)
return idd
# ------------------------------------------------------- #
# unused:
#
#def get_localsystemid():
# f = open("/etc/sysconfig/rhn/systemid", "r")
# content = f.read()
# loc_id = re.search(r'\b(ID-)(\d{10})' ,content)
# return loc_id.group(2)
# ------------------------------------------------------- #
def subscribe_channels(channelname, client, session, sysname, sys_id):
channels = base_channels(client, session, sys_id)
channels.append(channelname)
return client.system.setChildChannels(session, sys_id, channels)
# ------------------------------------------------------- #
def unsubscribe_channels(channelname, client, session, sysname, sys_id):
channels = base_channels(client, session, sys_id)
channels.remove(channelname)
return client.system.setChildChannels(session, sys_id, channels)
# ------------------------------------------------------- #
def base_channels(client, session, sys_id):
basechan = client.channel.software.listSystemChannels(session, sys_id)
try:
chans = [item['label'] for item in basechan]
except KeyError:
chans = [item['channel_label'] for item in basechan]
return chans
# ------------------------------------------------------- #
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default='present', choices=['present', 'absent']),
name = dict(required=True),
sysname = dict(required=True),
url = dict(required=True),
user = dict(required=True),
password = dict(required=True, aliases=['pwd']),
)
# supports_check_mode=True
)
state = module.params['state']
channelname = module.params['name']
systname = module.params['sysname']
saturl = module.params['url']
user = module.params['user']
password = module.params['password']
#initialize connection
client = xmlrpclib.Server(saturl, verbose=0)
session = client.auth.login(user, password)
# get systemid
sys_id = get_systemid(client, session, systname)
# get channels for system
chans = base_channels(client, session, sys_id)
if state == 'present':
if channelname in chans:
module.exit_json(changed=False, msg="Channel %s already exists" % channelname)
else:
subscribe_channels(channelname, client, session, systname, sys_id)
module.exit_json(changed=True, msg="Channel %s added" % channelname)
if state == 'absent':
if not channelname in chans:
module.exit_json(changed=False, msg="Not subscribed to channel %s." % channelname)
else:
unsubscribe_channels(channelname, client, session, systname, sys_id)
module.exit_json(changed=True, msg="Channel %s removed" % channelname)
client.auth.logout(session)
# import module snippets
from ansible.module_utils.basic import *
main()
#!/usr/bin/env python
#
# Copyright (c) 2015 Intel Corporation.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of works must retain the original copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the original copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of Intel Corporation nor the names of its contributors
# may be used to endorse or promote products derived from this work without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY INTEL CORPORATION "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL INTEL CORPORATION BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors:
# Li, Hao
import unittest
import os
import sys
import commands
import comm
from TestApp import *
app_name = "Spacedodgegame"
package_name = "org.xwalk." + app_name.lower()
active_name = app_name + "Activity"
sample_src = comm.sample_src_pref + "space-dodge-game/screen-orientation-resize/"
testapp = None
comm.setUp()
class Spacedodgegame(unittest.TestCase):
def test_1_pack(self):
#clean up old apk
commands.getstatusoutput("rm %s%s*" % (comm.build_app_dest, "org.xwalk." + app_name.lower()))
cmd = "%s --crosswalk=%s --platforms=android --android=%s --targets=%s --enable-remote-debugging %s" % \
(comm.apptools,
comm.crosswalkzip,
comm.MODE,
comm.ARCH,
sample_src)
comm.pack(cmd, app_name.lower(), self)
def test_2_install(self):
apk_file = commands.getstatusoutput("ls %s| grep %s" % (comm.build_app_dest, app_name.lower()))[1]
if apk_file.endswith(".apk"):
global testapp
testapp = TestApp(comm.device, comm.build_app_dest + apk_file, package_name, active_name)
if testapp.isInstalled():
testapp.uninstall()
self.assertTrue(testapp.install())
else:
print("-->> No packed %s apk in %s" % (app_name, comm.build_app_dest))
self.assertTrue(False)
def test_3_launch(self):
if testapp is not None:
self.assertTrue(testapp.launch())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
def test_4_switch(self):
if testapp is not None:
self.assertTrue(testapp.switch())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
def test_5_stop(self):
if testapp is not None:
self.assertTrue(testapp.stop())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
def test_6_uninstall(self):
if testapp is not None:
self.assertTrue(testapp.uninstall())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
def test_7_uninstall_when_app_running(self):
if testapp is not None:
if not testapp.isInstalled():
testapp.install()
if not testapp.isRunning():
testapp.launch()
self.assertTrue(testapp.uninstall())
else:
print("-->> Fail to pack %s apk" % app_name)
self.assertTrue(False)
if __name__ == '__main__':
unittest.main()
#!/usr/bin/env python
# Licensed to Cloudera, Inc. under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. Cloudera, Inc. licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
import pwd
from django.contrib.auth.models import User
from django.core import management
from django.core.management.base import BaseCommand
from desktop.models import Document, Document2, SAMPLE_USER_OWNERS
from useradmin.models import install_sample_user
LOG = logging.getLogger(__name__)
class Command(BaseCommand):
args = ''
help = 'Install examples but do not overwrite them.'
def handle(self, *args, **options):
if not options.get('user'):
user = User.objects.get(username=pwd.getpwuid(os.getuid()).pw_name)
else:
user = options['user']
if not Document2.objects.filter(type='notebook', owner__username__in=SAMPLE_USER_OWNERS).exists():
install_sample_user()
management.call_command('loaddata', 'initial_notebook_examples.json', verbosity=2)
Document.objects.sync()
from beeswax.management.commands.beeswax_install_examples import Command
app_name = 'beeswax'
Command().handle(app_name=app_name, user=user, tables='tables.json')
"""Simple HTTP Server.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
"""
__version__ = "0.6"
__all__ = ["SimpleHTTPRequestHandler"]
import os
import posixpath
import BaseHTTPServer
import urllib
import cgi
import sys
import shutil
import mimetypes
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
class SimpleHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler):
"""Simple HTTP request handler with GET and HEAD commands.
This serves files from the current directory and any of its
subdirectories. The MIME type for files is determined by
calling the .guess_type() method.
The GET and HEAD requests are identical except that the HEAD
request omits the actual contents of the file.
"""
server_version = "SimpleHTTP/" + __version__
def do_GET(self):
"""Serve a GET request."""
f = self.send_head()
if f:
self.copyfile(f, self.wfile)
f.close()
def do_HEAD(self):
"""Serve a HEAD request."""
f = self.send_head()
if f:
f.close()
def send_head(self):
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
try:
# Always read in binary mode. Opening files in text mode may cause
# newline translations, making the actual size of the content
# transmitted *less* than the content-length!
f = open(path, 'rb')
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
def list_directory(self, path):
"""Helper to produce a directory listing (absent index.html).
Return value is either a file object, or None (indicating an
error). In either case, the headers are sent, making the
interface the same as for send_head().
"""
try:
list = os.listdir(path)
except os.error:
self.send_error(404, "No permission to list directory")
return None
list.sort(key=lambda a: a.lower())
f = StringIO()
displaypath = cgi.escape(urllib.unquote(self.path))
f.write('')
f.write("\nDirectory listing for %s\n" % displaypath)
f.write("\nDirectory listing for %s
\n" % displaypath)
f.write("
\n\n")
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
f.write('- %s\n'
% (urllib.quote(linkname), cgi.escape(displayname)))
f.write("
\n
\n\n\n")
length = f.tell()
f.seek(0)
self.send_response(200)
encoding = sys.getfilesystemencoding()
self.send_header("Content-type", "text/html; charset=%s" % encoding)
self.send_header("Content-Length", str(length))
self.end_headers()
return f
def translate_path(self, path):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
path = posixpath.normpath(urllib.unquote(path))
words = path.split('/')
words = filter(None, words)
path = os.getcwd()
for word in words:
drive, word = os.path.splitdrive(word)
head, word = os.path.split(word)
if word in (os.curdir, os.pardir): continue
path = os.path.join(path, word)
return path
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler,
ServerClass = BaseHTTPServer.HTTPServer):
BaseHTTPServer.test(HandlerClass, ServerClass)
if __name__ == '__main__':
test()
"""An NNTP client class based on RFC 977: Network News Transfer Protocol.
Example:
>>> from nntplib import NNTP
>>> s = NNTP('news')
>>> resp, count, first, last, name = s.group('comp.lang.python')
>>> print 'Group', name, 'has', count, 'articles, range', first, 'to', last
Group comp.lang.python has 51 articles, range 5770 to 5821
>>> resp, subs = s.xhdr('subject', first + '-' + last)
>>> resp = s.quit()
>>>
Here 'resp' is the server response line.
Error responses are turned into exceptions.
To post an article from a file:
>>> f = open(filename, 'r') # file containing article, including header
>>> resp = s.post(f)
>>>
For descriptions of all methods, read the comments in the code below.
Note that all arguments and return values representing article numbers
are strings, not numbers, since they are rarely used for calculations.
"""
# RFC 977 by Brian Kantor and Phil Lapsley.
# xover, xgtitle, xpath, date methods by Kevan Heydon
# Imports
import re
import socket
__all__ = ["NNTP","NNTPReplyError","NNTPTemporaryError",
"NNTPPermanentError","NNTPProtocolError","NNTPDataError",
"error_reply","error_temp","error_perm","error_proto",
"error_data",]
# Exceptions raised when an error or invalid response is received
class NNTPError(Exception):
"""Base class for all nntplib exceptions"""
def __init__(self, *args):
Exception.__init__(self, *args)
try:
self.response = args[0]
except IndexError:
self.response = 'No response given'
class NNTPReplyError(NNTPError):
"""Unexpected [123]xx reply"""
pass
class NNTPTemporaryError(NNTPError):
"""4xx errors"""
pass
class NNTPPermanentError(NNTPError):
"""5xx errors"""
pass
class NNTPProtocolError(NNTPError):
"""Response does not begin with [1-5]"""
pass
class NNTPDataError(NNTPError):
"""Error in response data"""
pass
# for backwards compatibility
error_reply = NNTPReplyError
error_temp = NNTPTemporaryError
error_perm = NNTPPermanentError
error_proto = NNTPProtocolError
error_data = NNTPDataError
# Standard port used by NNTP servers
NNTP_PORT = 119
# Response numbers that are followed by additional text (e.g. article)
LONGRESP = ['100', '215', '220', '221', '222', '224', '230', '231', '282']
# Line terminators (we always output CRLF, but accept any of CRLF, CR, LF)
CRLF = '\r\n'
# The class itself
class NNTP:
def __init__(self, host, port=NNTP_PORT, user=None, password=None,
readermode=None, usenetrc=True):
"""Initialize an instance. Arguments:
- host: hostname to connect to
- port: port to connect to (default the standard NNTP port)
- user: username to authenticate with
- password: password to use with username
- readermode: if true, send 'mode reader' command after
connecting.
readermode is sometimes necessary if you are connecting to an
NNTP server on the local machine and intend to call
reader-specific commands, such as `group'. If you get
unexpected NNTPPermanentErrors, you might need to set
readermode.
"""
self.host = host
self.port = port
self.sock = socket.create_connection((host, port))
self.file = self.sock.makefile('rb')
self.debugging = 0
self.welcome = self.getresp()
# 'mode reader' is sometimes necessary to enable 'reader' mode.
# However, the order in which 'mode reader' and 'authinfo' need to
# arrive differs between some NNTP servers. Try to send
# 'mode reader', and if it fails with an authorization failed
# error, try again after sending authinfo.
readermode_afterauth = 0
if readermode:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
except NNTPTemporaryError, e:
if user and e.response[:3] == '480':
# Need authorization before 'mode reader'
readermode_afterauth = 1
else:
raise
# If no login/password was specified, try to get them from ~/.netrc
# Presume that if .netc has an entry, NNRP authentication is required.
try:
if usenetrc and not user:
import netrc
credentials = netrc.netrc()
auth = credentials.authenticators(host)
if auth:
user = auth[0]
password = auth[2]
except IOError:
pass
# Perform NNRP authentication if needed.
if user:
resp = self.shortcmd('authinfo user '+user)
if resp[:3] == '381':
if not password:
raise NNTPReplyError(resp)
else:
resp = self.shortcmd(
'authinfo pass '+password)
if resp[:3] != '281':
raise NNTPPermanentError(resp)
if readermode_afterauth:
try:
self.welcome = self.shortcmd('mode reader')
except NNTPPermanentError:
# error 500, probably 'not implemented'
pass
# Get the welcome message from the server
# (this is read and squirreled away by __init__()).
# If the response code is 200, posting is allowed;
# if it 201, posting is not allowed
def getwelcome(self):
"""Get the welcome message from the server
(this is read and squirreled away by __init__()).
If the response code is 200, posting is allowed;
if it 201, posting is not allowed."""
if self.debugging: print '*welcome*', repr(self.welcome)
return self.welcome
def set_debuglevel(self, level):
"""Set the debugging level. Argument 'level' means:
0: no debugging output (default)
1: print commands and responses but not body text etc.
2: also print raw lines read and sent before stripping CR/LF"""
self.debugging = level
debug = set_debuglevel
def putline(self, line):
"""Internal: send one line to the server, appending CRLF."""
line = line + CRLF
if self.debugging > 1: print '*put*', repr(line)
self.sock.sendall(line)
def putcmd(self, line):
"""Internal: send one command to the server (through putline())."""
if self.debugging: print '*cmd*', repr(line)
self.putline(line)
def getline(self):
"""Internal: return one line from the server, stripping CRLF.
Raise EOFError if the connection is closed."""
line = self.file.readline()
if self.debugging > 1:
print '*get*', repr(line)
if not line: raise EOFError
if line[-2:] == CRLF: line = line[:-2]
elif line[-1:] in CRLF: line = line[:-1]
return line
def getresp(self):
"""Internal: get a response from the server.
Raise various errors if the response indicates an error."""
resp = self.getline()
if self.debugging: print '*resp*', repr(resp)
c = resp[:1]
if c == '4':
raise NNTPTemporaryError(resp)
if c == '5':
raise NNTPPermanentError(resp)
if c not in '123':
raise NNTPProtocolError(resp)
return resp
def getlongresp(self, file=None):
"""Internal: get a response plus following text from the server.
Raise various errors if the response indicates an error."""
openedFile = None
try:
# If a string was passed then open a file with that name
if isinstance(file, str):
openedFile = file = open(file, "w")
resp = self.getresp()
if resp[:3] not in LONGRESP:
raise NNTPReplyError(resp)
list = []
while 1:
line = self.getline()
if line == '.':
break
if line[:2] == '..':
line = line[1:]
if file:
file.write(line + "\n")
else:
list.append(line)
finally:
# If this method created the file, then it must close it
if openedFile:
openedFile.close()
return resp, list
def shortcmd(self, line):
"""Internal: send a command and get the response."""
self.putcmd(line)
return self.getresp()
def longcmd(self, line, file=None):
"""Internal: send a command and get the response plus following text."""
self.putcmd(line)
return self.getlongresp(file)
def newgroups(self, date, time, file=None):
"""Process a NEWGROUPS command. Arguments:
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of newsgroup names"""
return self.longcmd('NEWGROUPS ' + date + ' ' + time, file)
def newnews(self, group, date, time, file=None):
"""Process a NEWNEWS command. Arguments:
- group: group name or '*'
- date: string 'yymmdd' indicating the date
- time: string 'hhmmss' indicating the time
Return:
- resp: server response if successful
- list: list of message ids"""
cmd = 'NEWNEWS ' + group + ' ' + date + ' ' + time
return self.longcmd(cmd, file)
def list(self, file=None):
"""Process a LIST command. Return:
- resp: server response if successful
- list: list of (group, last, first, flag) (strings)"""
resp, list = self.longcmd('LIST', file)
for i in range(len(list)):
# Parse lines into "group last first flag"
list[i] = tuple(list[i].split())
return resp, list
def description(self, group):
"""Get a description for a single group. If more than one
group matches ('group' is a pattern), return the first. If no
group matches, return an empty string.
This elides the response code from the server, since it can
only be '215' or '285' (for xgtitle) anyway. If the response
code is needed, use the 'descriptions' method.
NOTE: This neither checks for a wildcard in 'group' nor does
it check whether the group actually exists."""
resp, lines = self.descriptions(group)
if len(lines) == 0:
return ""
else:
return lines[0][1]
def descriptions(self, group_pattern):
"""Get descriptions for a range of groups."""
line_pat = re.compile("^(?P[^ \t]+)[ \t]+(.*)$")
# Try the more std (acc. to RFC2980) LIST NEWSGROUPS first
resp, raw_lines = self.longcmd('LIST NEWSGROUPS ' + group_pattern)
if resp[:3] != "215":
# Now the deprecated XGTITLE. This either raises an error
# or succeeds with the same output structure as LIST
# NEWSGROUPS.
resp, raw_lines = self.longcmd('XGTITLE ' + group_pattern)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def group(self, name):
"""Process a GROUP command. Argument:
- group: the group name
Returns:
- resp: server response if successful
- count: number of articles (string)
- first: first article number (string)
- last: last article number (string)
- name: the group name"""
resp = self.shortcmd('GROUP ' + name)
if resp[:3] != '211':
raise NNTPReplyError(resp)
words = resp.split()
count = first = last = 0
n = len(words)
if n > 1:
count = words[1]
if n > 2:
first = words[2]
if n > 3:
last = words[3]
if n > 4:
name = words[4].lower()
return resp, count, first, last, name
def help(self, file=None):
"""Process a HELP command. Returns:
- resp: server response if successful
- list: list of strings"""
return self.longcmd('HELP',file)
def statparse(self, resp):
"""Internal: parse the response of a STAT, NEXT or LAST command."""
if resp[:2] != '22':
raise NNTPReplyError(resp)
words = resp.split()
nr = 0
id = ''
n = len(words)
if n > 1:
nr = words[1]
if n > 2:
id = words[2]
return resp, nr, id
def statcmd(self, line):
"""Internal: process a STAT, NEXT or LAST command."""
resp = self.shortcmd(line)
return self.statparse(resp)
def stat(self, id):
"""Process a STAT command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: the article number
- id: the message id"""
return self.statcmd('STAT ' + id)
def next(self):
"""Process a NEXT command. No arguments. Return as for STAT."""
return self.statcmd('NEXT')
def last(self):
"""Process a LAST command. No arguments. Return as for STAT."""
return self.statcmd('LAST')
def artcmd(self, line, file=None):
"""Internal: process a HEAD, BODY or ARTICLE command."""
resp, list = self.longcmd(line, file)
resp, nr, id = self.statparse(resp)
return resp, nr, id, list
def head(self, id):
"""Process a HEAD command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's header"""
return self.artcmd('HEAD ' + id)
def body(self, id, file=None):
"""Process a BODY command. Argument:
- id: article number or message id
- file: Filename string or file object to store the article in
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article's body or an empty list
if file was used"""
return self.artcmd('BODY ' + id, file)
def article(self, id):
"""Process an ARTICLE command. Argument:
- id: article number or message id
Returns:
- resp: server response if successful
- nr: article number
- id: message id
- list: the lines of the article"""
return self.artcmd('ARTICLE ' + id)
def slave(self):
"""Process a SLAVE command. Returns:
- resp: server response if successful"""
return self.shortcmd('SLAVE')
def xhdr(self, hdr, str, file=None):
"""Process an XHDR command (optional server extension). Arguments:
- hdr: the header type (e.g. 'subject')
- str: an article nr, a message id, or a range nr1-nr2
Returns:
- resp: server response if successful
- list: list of (nr, value) strings"""
pat = re.compile('^([0-9]+) ?(.*)\n?')
resp, lines = self.longcmd('XHDR ' + hdr + ' ' + str, file)
for i in range(len(lines)):
line = lines[i]
m = pat.match(line)
if m:
lines[i] = m.group(1, 2)
return resp, lines
def xover(self, start, end, file=None):
"""Process an XOVER command (optional server extension) Arguments:
- start: start of range
- end: end of range
Returns:
- resp: server response if successful
- list: list of (art-nr, subject, poster, date,
id, references, size, lines)"""
resp, lines = self.longcmd('XOVER ' + start + '-' + end, file)
xover_lines = []
for line in lines:
elem = line.split("\t")
try:
xover_lines.append((elem[0],
elem[1],
elem[2],
elem[3],
elem[4],
elem[5].split(),
elem[6],
elem[7]))
except IndexError:
raise NNTPDataError(line)
return resp,xover_lines
def xgtitle(self, group, file=None):
"""Process an XGTITLE command (optional server extension) Arguments:
- group: group name wildcard (i.e. news.*)
Returns:
- resp: server response if successful
- list: list of (name,title) strings"""
line_pat = re.compile("^([^ \t]+)[ \t]+(.*)$")
resp, raw_lines = self.longcmd('XGTITLE ' + group, file)
lines = []
for raw_line in raw_lines:
match = line_pat.search(raw_line.strip())
if match:
lines.append(match.group(1, 2))
return resp, lines
def xpath(self,id):
"""Process an XPATH command (optional server extension) Arguments:
- id: Message id of article
Returns:
resp: server response if successful
path: directory path to article"""
resp = self.shortcmd("XPATH " + id)
if resp[:3] != '223':
raise NNTPReplyError(resp)
try:
[resp_num, path] = resp.split()
except ValueError:
raise NNTPReplyError(resp)
else:
return resp, path
def date (self):
"""Process the DATE command. Arguments:
None
Returns:
resp: server response if successful
date: Date suitable for newnews/newgroups commands etc.
time: Time suitable for newnews/newgroups commands etc."""
resp = self.shortcmd("DATE")
if resp[:3] != '111':
raise NNTPReplyError(resp)
elem = resp.split()
if len(elem) != 2:
raise NNTPDataError(resp)
date = elem[1][2:8]
time = elem[1][-6:]
if len(date) != 6 or len(time) != 6:
raise NNTPDataError(resp)
return resp, date, time
def post(self, f):
"""Process a POST command. Arguments:
- f: file containing the article
Returns:
- resp: server response if successful"""
resp = self.shortcmd('POST')
# Raises error_??? if posting is not allowed
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def ihave(self, id, f):
"""Process an IHAVE command. Arguments:
- id: message-id of the article
- f: file containing the article
Returns:
- resp: server response if successful
Note that if the server refuses the article an exception is raised."""
resp = self.shortcmd('IHAVE ' + id)
# Raises error_??? if the server already has it
if resp[0] != '3':
raise NNTPReplyError(resp)
while 1:
line = f.readline()
if not line:
break
if line[-1] == '\n':
line = line[:-1]
if line[:1] == '.':
line = '.' + line
self.putline(line)
self.putline('.')
return self.getresp()
def quit(self):
"""Process a QUIT command and close the socket. Returns:
- resp: server response if successful"""
resp = self.shortcmd('QUIT')
self.file.close()
self.sock.close()
del self.file, self.sock
return resp
# Test retrieval when run as a script.
# Assumption: if there's a local news server, it's called 'news'.
# Assumption: if user queries a remote news server, it's named
# in the environment variable NNTPSERVER (used by slrn and kin)
# and we want readermode off.
if __name__ == '__main__':
import os
newshost = 'news' and os.environ["NNTPSERVER"]
if newshost.find('.') == -1:
mode = 'readermode'
else:
mode = None
s = NNTP(newshost, readermode=mode)
resp, count, first, last, name = s.group('comp.lang.python')
print resp
print 'Group', name, 'has', count, 'articles, range', first, 'to', last
resp, subs = s.xhdr('subject', first + '-' + last)
print resp
for item in subs:
print "%7s %s" % item
resp = s.quit()
print resp
# -*- coding: utf-8 -*-
"""Implements indexer for building STAR references."""
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
from typing import Any
import toolz
from imfusion.external.star import star_index
from .base import Indexer, Reference, register_indexer
class StarIndexer(Indexer):
"""Indexer that builds references for the STAR aligner.
Performs the same steps as the base ``Indexer`` class, but additionally
generates an index for alignment with STAR using STAR's ``genomeGenerate``
command. Special attention should be paid to the ``overhang`` parameter,
which defines the overhang used by STAR in the build reference
(see the ``sjdbOverhang`` parameter in the STAR documentation for more
details). Ideally, the value for this parameter should be one less than the
length of the used reads.
"""
def __init__(self, logger=None, skip_index=False, overhang=100, threads=1):
# type: (Any, int) -> None
super().__init__(logger=logger, skip_index=skip_index)
self._overhang = overhang
self._threads = threads
@property
def _reference_class(self):
"""Reference class to use for this indexer."""
return StarReference
@property
def dependencies(self):
"""External dependencies required by this indexer."""
return ['STAR']
def _build_indices(self, reference):
# type: (StarReference) -> None
self._logger.info('Building STAR index')
star_index(
fasta_path=reference.fasta_path,
gtf_path=reference.gtf_path,
output_dir=reference.index_path,
overhang=self._overhang,
threads=self._threads,
log_path=reference.base_path / 'star.log')
@classmethod
def configure_args(cls, parser):
"""Configures an argument parser for the Indexer.
Used by ``imfusion-build`` to configure the sub-command for
this indexer (if registered as an Indexer using the
``register_indexer`` function).
Parameters
----------
parser : argparse.ArgumentParser
Argument parser to configure.
"""
super().configure_args(parser)
star_group = parser.add_argument_group('STAR arguments')
star_group.add_argument('--star_overhang', type=int, default=100)
star_group.add_argument('--star_threads', type=int, default=1)
@classmethod
def _parse_args(cls, args):
super_args = super()._parse_args(args)
return toolz.merge(super_args, {
'overhang': args.star_overhang,
'threads': args.star_threads
})
class StarReference(Reference):
"""Star Reference class.
Defines paths to files within the STAR reference. Currently the same
as the base ``Reference`` class.
"""
pass
register_indexer('star', StarIndexer)
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import hashlib
import json
import os
import sys
import buildbot_common
import build_version
from build_paths import SCRIPT_DIR
GS_MANIFEST_PATH = 'gs://nativeclient-mirror/nacl/nacl_sdk/'
SDK_MANIFEST = 'naclsdk_manifest2.json'
MONO_MANIFEST = 'naclmono_manifest.json'
def build_and_upload_mono(sdk_revision, pepper_revision, sdk_url,
upload_path, args):
install_dir = 'naclmono'
buildbot_common.RemoveDir(install_dir)
revision_opt = ['--sdk-revision', sdk_revision] if sdk_revision else []
url_opt = ['--sdk-url', sdk_url] if sdk_url else []
buildbot_common.Run([sys.executable, 'nacl-mono-builder.py',
'--arch', 'x86-32', '--install-dir', install_dir] +
revision_opt + url_opt + args)
buildbot_common.Run([sys.executable, 'nacl-mono-builder.py',
'--arch', 'x86-64', '--install-dir', install_dir] +
revision_opt + url_opt + args)
buildbot_common.Run([sys.executable, 'nacl-mono-builder.py',
'--arch', 'arm', '--install-dir', install_dir] +
revision_opt + url_opt + args)
buildbot_common.Run([sys.executable, 'nacl-mono-archive.py',
'--upload-path', upload_path,
'--pepper-revision', pepper_revision,
'--install-dir', install_dir] + args)
def get_sdk_build_info():
'''Returns a list of dictionaries for versions of NaCl Mono to build which are
out of date compared to the SDKs available to naclsdk'''
# Get a copy of the naclsdk manifest file
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp',
GS_MANIFEST_PATH + SDK_MANIFEST, '.'])
manifest_file = open(SDK_MANIFEST, 'r')
sdk_manifest = json.loads(manifest_file.read())
manifest_file.close()
pepper_infos = []
for key, value in sdk_manifest.items():
if key == 'bundles':
stabilities = ['stable', 'beta', 'dev', 'post_stable']
# Pick pepper_* bundles, need pepper_19 or greater to build Mono
bundles = filter(lambda b: (b['stability'] in stabilities
and 'pepper_' in b['name'])
and b['version'] >= 19, value)
for b in bundles:
newdict = {}
newdict['pepper_revision'] = str(b['version'])
linux_arch = filter(lambda u: u['host_os'] == 'linux', b['archives'])
newdict['sdk_url'] = linux_arch[0]['url']
newdict['sdk_revision'] = b['revision']
newdict['stability'] = b['stability']
newdict['naclmono_name'] = 'naclmono_' + newdict['pepper_revision']
pepper_infos.append(newdict)
# Get a copy of the naclmono manifest file
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp',
GS_MANIFEST_PATH + MONO_MANIFEST, '.'])
manifest_file = open(MONO_MANIFEST, 'r')
mono_manifest = json.loads(manifest_file.read())
manifest_file.close()
ret = []
mono_manifest_dirty = False
# Check to see if we need to rebuild mono based on sdk revision
for key, value in mono_manifest.items():
if key == 'bundles':
for info in pepper_infos:
bundle = filter(lambda b: b['name'] == info['naclmono_name'], value)
if len(bundle) == 0:
info['naclmono_rev'] = '1'
ret.append(info)
else:
if info['sdk_revision'] != bundle[0]['sdk_revision']:
# This bundle exists in the mono manifest, bump the revision
# for the new build we're about to make.
info['naclmono_rev'] = str(bundle[0]['revision'] + 1)
ret.append(info)
elif info['stability'] != bundle[0]['stability']:
# If all that happened was the SDK bundle was promoted in stability,
# change only that and re-write the manifest
mono_manifest_dirty = True
bundle[0]['stability'] = info['stability']
# re-write the manifest here because there are no bundles to build but
# the manifest has changed
if mono_manifest_dirty and len(ret) == 0:
manifest_file = open(MONO_MANIFEST, 'w')
manifest_file.write(json.dumps(mono_manifest, sort_keys=False, indent=2))
manifest_file.close()
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp', '-a', 'public-read',
MONO_MANIFEST, GS_MANIFEST_PATH + MONO_MANIFEST])
return ret
def update_mono_sdk_json(infos):
'''Update the naclmono manifest with the newly built packages'''
if len(infos) == 0:
return
manifest_file = open(MONO_MANIFEST, 'r')
mono_manifest = json.loads(manifest_file.read())
manifest_file.close()
for info in infos:
bundle = {}
bundle['name'] = info['naclmono_name']
bundle['description'] = 'Mono for Native Client'
bundle['stability'] = info['stability']
bundle['recommended'] = 'no'
bundle['version'] = 'experimental'
archive = {}
sha1_hash = hashlib.sha1()
f = open(info['naclmono_name'] + '.bz2', 'rb')
sha1_hash.update(f.read())
archive['size'] = f.tell()
f.close()
archive['checksum'] = { 'sha1': sha1_hash.hexdigest() }
archive['host_os'] = 'all'
archive['url'] = ('https://storage.googleapis.com/'
'nativeclient-mirror/nacl/nacl_sdk/%s/%s/%s.bz2'
% (info['naclmono_name'], info['naclmono_rev'],
info['naclmono_name']))
bundle['archives'] = [archive]
bundle['revision'] = int(info['naclmono_rev'])
bundle['sdk_revision'] = int(info['sdk_revision'])
# Insert this new bundle into the manifest,
# probably overwriting an existing bundle.
for key, value in mono_manifest.items():
if key == 'bundles':
existing = filter(lambda b: b['name'] == info['naclmono_name'], value)
if len(existing) > 0:
loc = value.index(existing[0])
value[loc] = bundle
else:
value.append(bundle)
# Write out the file locally, then upload to its known location.
manifest_file = open(MONO_MANIFEST, 'w')
manifest_file.write(json.dumps(mono_manifest, sort_keys=False, indent=2))
manifest_file.close()
buildbot_common.Run([buildbot_common.GetGsutil(), 'cp', '-a', 'public-read',
MONO_MANIFEST, GS_MANIFEST_PATH + MONO_MANIFEST])
def main(args):
args = args[1:]
# Delete global configs that would override the mono builders' configuration.
if 'AWS_CREDENTIAL_FILE' in os.environ:
del os.environ['AWS_CREDENTIAL_FILE']
if 'BOTO_CONFIG' in os.environ:
del os.environ['BOTO_CONFIG']
buildbot_revision = os.environ.get('BUILDBOT_REVISION', '')
buildername = os.environ.get('BUILDBOT_BUILDERNAME', '')
os.chdir(SCRIPT_DIR)
if buildername == 'linux-sdk-mono32':
assert buildbot_revision
sdk_revision = buildbot_revision.split(':')[0]
pepper_revision = build_version.ChromeMajorVersion()
build_and_upload_mono(sdk_revision, pepper_revision, None,
'trunk.' + sdk_revision, args)
elif buildername == 'linux-sdk-mono64':
infos = get_sdk_build_info()
for info in infos:
# This will put the file in naclmono_19/1/naclmono_19.bz2 for example.
upload_path = info['naclmono_name'] + '/' + info['naclmono_rev']
build_and_upload_mono(None, info['pepper_revision'], info['sdk_url'],
upload_path, args)
update_mono_sdk_json(infos)
if __name__ == '__main__':
sys.exit(main(sys.argv))
from __future__ import unicode_literals
import unittest
from django.contrib.admindocs import views
from django.db import models
from django.db.models import fields
from django.utils.translation import ugettext as _
class CustomField(models.Field):
description = "A custom field type"
class DescriptionLackingField(models.Field):
pass
class TestFieldType(unittest.TestCase):
def setUp(self):
pass
def test_field_name(self):
self.assertRaises(
AttributeError,
views.get_readable_field_data_type, "NotAField"
)
def test_builtin_fields(self):
self.assertEqual(
views.get_readable_field_data_type(fields.BooleanField()),
_('Boolean (Either True or False)')
)
def test_custom_fields(self):
self.assertEqual(
views.get_readable_field_data_type(CustomField()),
'A custom field type'
)
self.assertEqual(
views.get_readable_field_data_type(DescriptionLackingField()),
_('Field of type: %(field_type)s') % {
'field_type': 'DescriptionLackingField'
}
)
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, _dict
from frappe.utils import (flt, getdate, get_first_day, get_last_day,
add_months, add_days, formatdate)
def get_period_list(fiscal_year, periodicity, from_beginning=False):
"""Get a list of dict {"to_date": to_date, "key": key, "label": label}
Periodicity can be (Yearly, Quarterly, Monthly)"""
fy_start_end_date = frappe.db.get_value("Fiscal Year", fiscal_year, ["year_start_date", "year_end_date"])
if not fy_start_end_date:
frappe.throw(_("Fiscal Year {0} not found.").format(fiscal_year))
start_date = getdate(fy_start_end_date[0])
end_date = getdate(fy_start_end_date[1])
if periodicity == "Yearly":
period_list = [_dict({"to_date": end_date, "key": fiscal_year, "label": fiscal_year})]
else:
months_to_add = {
"Half-yearly": 6,
"Quarterly": 3,
"Monthly": 1
}[periodicity]
period_list = []
# start with first day, so as to avoid year to_dates like 2-April if ever they occur
to_date = get_first_day(start_date)
for i in xrange(12 / months_to_add):
to_date = add_months(to_date, months_to_add)
if to_date == get_first_day(to_date):
# if to_date is the first day, get the last day of previous month
to_date = add_days(to_date, -1)
else:
# to_date should be the last day of the new to_date's month
to_date = get_last_day(to_date)
if to_date <= end_date:
# the normal case
period_list.append(_dict({ "to_date": to_date }))
# if it ends before a full year
if to_date == end_date:
break
else:
# if a fiscal year ends before a 12 month period
period_list.append(_dict({ "to_date": end_date }))
break
# common processing
for opts in period_list:
key = opts["to_date"].strftime("%b_%Y").lower()
label = formatdate(opts["to_date"], "MMM YYYY")
opts.update({
"key": key.replace(" ", "_").replace("-", "_"),
"label": label,
"year_start_date": start_date,
"year_end_date": end_date
})
if from_beginning:
# set start date as None for all fiscal periods, used in case of Balance Sheet
opts["from_date"] = None
else:
opts["from_date"] = start_date
return period_list
def get_data(company, root_type, balance_must_be, period_list, ignore_closing_entries=False):
accounts = get_accounts(company, root_type)
if not accounts:
return None
accounts, accounts_by_name = filter_accounts(accounts)
gl_entries_by_account = get_gl_entries(company, period_list[0]["from_date"], period_list[-1]["to_date"],
accounts[0].lft, accounts[0].rgt, ignore_closing_entries=ignore_closing_entries)
calculate_values(accounts, gl_entries_by_account, period_list)
accumulate_values_into_parents(accounts, accounts_by_name, period_list)
out = prepare_data(accounts, balance_must_be, period_list)
if out:
add_total_row(out, balance_must_be, period_list)
return out
def calculate_values(accounts, gl_entries_by_account, period_list):
for d in accounts:
for name in ([d.name] + (d.collapsed_children or [])):
for entry in gl_entries_by_account.get(name, []):
for period in period_list:
entry.posting_date = getdate(entry.posting_date)
# check if posting date is within the period
if entry.posting_date <= period.to_date:
d[period.key] = d.get(period.key, 0.0) + flt(entry.debit) - flt(entry.credit)
def accumulate_values_into_parents(accounts, accounts_by_name, period_list):
"""accumulate children's values in parent accounts"""
for d in reversed(accounts):
if d.parent_account:
for period in period_list:
accounts_by_name[d.parent_account][period.key] = accounts_by_name[d.parent_account].get(period.key, 0.0) + \
d.get(period.key, 0.0)
def prepare_data(accounts, balance_must_be, period_list):
out = []
year_start_date = period_list[0]["year_start_date"].strftime("%Y-%m-%d")
year_end_date = period_list[-1]["year_end_date"].strftime("%Y-%m-%d")
for d in accounts:
# add to output
has_value = False
row = {
"account_name": d.account_name,
"account": d.name,
"parent_account": d.parent_account,
"indent": flt(d.indent),
"from_date": year_start_date,
"to_date": year_end_date
}
for period in period_list:
if d.get(period.key):
# change sign based on Debit or Credit, since calculation is done using (debit - credit)
d[period.key] *= (1 if balance_must_be=="Debit" else -1)
row[period.key] = flt(d.get(period.key, 0.0), 3)
if abs(row[period.key]) >= 0.005:
# ignore zero values
has_value = True
if has_value:
out.append(row)
return out
def add_total_row(out, balance_must_be, period_list):
row = {
"account_name": _("Total ({0})").format(balance_must_be),
"account": None
}
for period in period_list:
row[period.key] = out[0].get(period.key, 0.0)
out[0][period.key] = ""
out.append(row)
# blank row after Total
out.append({})
def get_accounts(company, root_type):
# root lft, rgt
root_account = frappe.db.sql("""select lft, rgt from tabAccount
where company=%s and root_type=%s order by lft limit 1""",
(company, root_type), as_dict=True)
if not root_account:
return None
lft, rgt = root_account[0].lft, root_account[0].rgt
accounts = frappe.db.sql("""select * from tabAccount
where company=%(company)s and lft >= %(lft)s and rgt <= %(rgt)s order by lft""",
{ "company": company, "lft": lft, "rgt": rgt }, as_dict=True)
return accounts
def filter_accounts(accounts, depth=10):
parent_children_map = {}
accounts_by_name = {}
for d in accounts:
accounts_by_name[d.name] = d
parent_children_map.setdefault(d.parent_account or None, []).append(d)
filtered_accounts = []
def add_to_list(parent, level):
if level < depth:
for child in (parent_children_map.get(parent) or []):
child.indent = level
filtered_accounts.append(child)
add_to_list(child.name, level + 1)
else:
# include all children at level lower than the depth
parent_account = accounts_by_name[parent]
parent_account["collapsed_children"] = []
for d in accounts:
if d.lft > parent_account.lft and d.rgt < parent_account.rgt:
parent_account["collapsed_children"].append(d.name)
add_to_list(None, 0)
return filtered_accounts, accounts_by_name
def get_gl_entries(company, from_date, to_date, root_lft, root_rgt, ignore_closing_entries=False):
"""Returns a dict like { "account": [gl entries], ... }"""
additional_conditions = []
if ignore_closing_entries:
additional_conditions.append("and ifnull(voucher_type, '')!='Period Closing Voucher'")
if from_date:
additional_conditions.append("and posting_date >= %(from_date)s")
gl_entries = frappe.db.sql("""select * from tabGL_Entry
where company=%(company)s
{additional_conditions}
and posting_date <= %(to_date)s
and account in (select name from tabAccount
where lft >= %(lft)s and rgt <= %(rgt)s)
order by account, posting_date""".format(additional_conditions="\n".join(additional_conditions)),
{
"company": company,
"from_date": from_date,
"to_date": to_date,
"lft": root_lft,
"rgt": root_rgt
},
as_dict=True)
gl_entries_by_account = {}
for entry in gl_entries:
gl_entries_by_account.setdefault(entry.account, []).append(entry)
return gl_entries_by_account
def get_columns(period_list):
columns = [{
"fieldname": "account",
"label": _("Account"),
"fieldtype": "Link",
"options": "Account",
"width": 300
}]
for period in period_list:
columns.append({
"fieldname": period.key,
"label": period.label,
"fieldtype": "Currency",
"width": 150
})
return columns
#!/usr/bin/env python3
"""
To run: python3 nb2to3.py notebook-or-directory
"""
# Authors: Thomas Kluyver, Fernando Perez
# See: https://gist.github.com/takluyver/c8839593c615bb2f6e80
# found at https://stackoverflow.com/questions/20651502/ipython-code-migration-from-python-2-to-python-3
import argparse
import pathlib
from nbformat import read, write
import lib2to3
from lib2to3.refactor import RefactoringTool, get_fixers_from_package
def refactor_notebook_inplace(rt, path):
def refactor_cell(src):
#print('\n***SRC***\n', src)
try:
tree = rt.refactor_string(src+'\n', str(path) + '/cell-%d' % i)
except (lib2to3.pgen2.parse.ParseError,
lib2to3.pgen2.tokenize.TokenError):
return src
else:
return str(tree)[:-1]
print("Refactoring:", path)
nb = read(str(path), as_version=4)
# Run 2to3 on code
for i, cell in enumerate(nb.cells, start=1):
if cell.cell_type == 'code':
if cell.execution_count in (' ', '*'):
cell.execution_count = None
if cell.source.startswith('%%'):
# For cell magics, try to refactor the body, in case it's
# valid python
head, source = cell.source.split('\n', 1)
cell.source = head + '\n' + refactor_cell(source)
else:
cell.source = refactor_cell(cell.source)
# Update notebook metadata
nb.metadata.kernelspec = {
'display_name': 'Python 3',
'name': 'python3',
'language': 'python',
}
if 'language_info' in nb.metadata:
nb.metadata.language_info.codemirror_mode = {
'name': 'ipython',
'version': 3,
}
nb.metadata.language_info.pygments_lexer = 'ipython3'
nb.metadata.language_info.pop('version', None)
write(nb, str(path))
def main(argv=None):
ap = argparse.ArgumentParser()
ap.add_argument('path', type=pathlib.Path,
help="Notebook or directory containing notebooks")
options = ap.parse_args(argv)
avail_fixes = set(get_fixers_from_package('lib2to3.fixes'))
rt = RefactoringTool(avail_fixes)
if options.path.is_dir():
for nb_path in options.path.rglob('*.ipynb'):
refactor_notebook_inplace(rt, nb_path)
else:
refactor_notebook_inplace(rt, options.path)
if __name__ == '__main__':
main()
__author__ = 'watson-parris'
from cis.data_io.products.HadGEM import HadGEM_PP
import logging
class HadGEM_unknown_vars(HadGEM_PP):
def get_variable_names(self, filenames, data_type=None):
import iris
import cf_units as unit
from cis.utils import single_warnings_only
# Removes warnings and prepares for future Iris change
iris.FUTURE.netcdf_promote = True
variables = []
# Filter the warnings so that they only appear once - otherwise you get lots of repeated warnings
with single_warnings_only():
cubes = iris.load(filenames)
for cube in cubes:
is_time_lat_lon_pressure_altitude_or_has_only_1_point = True
for dim in cube.dim_coords:
units = dim.units
if dim.points.size > 1 and \
not units.is_time() and \
not units.is_time_reference() and \
not units.is_vertical() and \
not units.is_convertible(unit.Unit('degrees')):
is_time_lat_lon_pressure_altitude_or_has_only_1_point = False
break
if is_time_lat_lon_pressure_altitude_or_has_only_1_point:
name = cube.var_name or cube.name()
if name == 'unknown' and 'STASH' in cube.attributes:
name = '{}'.format(cube.attributes['STASH'])
variables.append(name)
return set(variables)
@staticmethod
def load_multiple_files_callback(cube, field, filename):
# This method sets the var_name (used for outputting the cube to NetCDF) to the cube name. This can be quite
# for some HadGEM variables but most commands allow the user to override this field on output.
var_name = cube.name()
if var_name == 'unknown' and 'STASH' in cube.attributes:
var_name = '{}'.format(cube.attributes['STASH'])
try:
cube.var_name = var_name
except ValueError as e:
logging.info("Unable to set var_name due to error: {}".format(e))
@staticmethod
def load_single_file_callback(cube, field, filename):
# This method sets the var_name (used for outputting the cube to NetCDF) to the cube name. This can be quite
# for some HadGEM variables but most commands allow the user to override this field on output.
var_name = cube.name()
if var_name == 'unknown' and 'STASH' in cube.attributes:
var_name = '{}'.format(cube.attributes['STASH'])
try:
cube.var_name = var_name
except ValueError as e:
logging.info("Unable to set var_name due to error: {}".format(e))
# Copyright 2008-2015 Nokia Solutions and Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from contextlib import contextmanager
import inspect
import re
import struct
import telnetlib
import time
try:
import pyte
except ImportError:
pyte = None
from robot.api import logger
from robot.utils import (ConnectionCache, is_bytes, is_string, is_truthy,
is_unicode, secs_to_timestr, seq2str, timestr_to_secs)
from robot.version import get_version
class Telnet(object):
"""A test library providing communication over Telnet connections.
``Telnet`` is Robot Framework's standard library that makes it possible to
connect to Telnet servers and execute commands on the opened connections.
== Table of contents ==
- `Connections`
- `Writing and reading`
- `Configuration`
- `Terminal emulation`
- `Logging`
- `Time string format`
- `Boolean arguments`
- `Importing`
- `Shortcuts`
- `Keywords`
= Connections =
The first step of using ``Telnet`` is opening a connection with `Open
Connection` keyword. Typically the next step is logging in with `Login`
keyword, and in the end the opened connection can be closed with `Close
Connection`.
It is possible to open multiple connections and switch the active one
using `Switch Connection`. `Close All Connections` can be used to close
all the connections, which is especially useful in suite teardowns to
guarantee that all connections are always closed.
= Writing and reading =
After opening a connection and possibly logging in, commands can be
executed or text written to the connection for other reasons using `Write`
and `Write Bare` keywords. The main difference between these two is that
the former adds a [#Configuration|configurable newline] after the text
automatically.
After writing something to the connection, the resulting output can be
read using `Read`, `Read Until`, `Read Until Regexp`, and `Read Until
Prompt` keywords. Which one to use depends on the context, but the latest
one is often the most convenient.
As a convenience when running a command, it is possible to use `Execute
Command` that simply uses `Write` and `Read Until Prompt` internally.
`Write Until Expected Output` is useful if you need to wait until writing
something produces a desired output.
Written and read text is automatically encoded/decoded using a
[#Configuration|configured encoding].
The ANSI escape codes, like cursor movement and color codes, are
normally returned as part of the read operation. If an escape code occurs
in middle of a search pattern it may also prevent finding the searched
string. `Terminal emulation` can be used to process these
escape codes as they would be if a real terminal would be in use.
= Configuration =
Many aspects related the connections can be easily configured either
globally or per connection basis. Global configuration is done when
[#Importing|library is imported], and these values can be overridden per
connection by `Open Connection` or with setting specific keywords
`Set Timeout`, `Set Newline`, `Set Prompt`, `Set Encoding`,
`Set Default Log Level` and `Set Telnetlib Log Level`.
Values of ``environ_user``, ``window_size``, ``terminal_emulation``, and
``terminal_type`` can not be changed after opening the connection.
== Timeout ==
Timeout defines how long is the maximum time to wait when reading
output. It is used internally by `Read Until`, `Read Until Regexp`,
`Read Until Prompt`, and `Login` keywords. The default value is 3 seconds.
== Newline ==
Newline defines which line separator `Write` keyword should use. The
default value is ``CRLF`` that is typically used by Telnet connections.
Newline can be given either in escaped format using ``\\n`` and ``\\r`` or
with special ``LF`` and ``CR`` syntax.
Examples:
| `Set Newline` | \\n |
| `Set Newline` | CRLF |
== Prompt ==
Often the easiest way to read the output of a command is reading all
the output until the next prompt with `Read Until Prompt`. It also makes
it easier, and faster, to verify did `Login` succeed.
Prompt can be specified either as a normal string or a regular expression.
The latter is especially useful if the prompt changes as a result of
the executed commands. Prompt can be set to be a regular expression
by giving ``prompt_is_regexp`` argument a true value (see `Boolean
arguments`).
Examples:
| `Open Connection` | lolcathost | prompt=$ |
| `Set Prompt` | (> |# ) | prompt_is_regexp=true |
== Encoding ==
To ease handling text containing non-ASCII characters, all written text is
encoded and read text decoded by default. The default encoding is UTF-8
that works also with ASCII. Encoding can be disabled by using a special
encoding value ``NONE``. This is mainly useful if you need to get the bytes
received from the connection as-is.
Notice that when writing to the connection, only Unicode strings are
encoded using the defined encoding. Byte strings are expected to be already
encoded correctly. Notice also that normal text in test data is passed to
the library as Unicode and you need to use variables to use bytes.
It is also possible to configure the error handler to use if encoding or
decoding characters fails. Accepted values are the same that encode/decode
functions in Python strings accept. In practice the following values are
the most useful:
- ``ignore``: ignore characters that cannot be encoded (default)
- ``strict``: fail if characters cannot be encoded
- ``replace``: replace characters that cannot be encoded with a replacement
character
Examples:
| `Open Connection` | lolcathost | encoding=Latin1 | encoding_errors=strict |
| `Set Encoding` | ISO-8859-15 |
| `Set Encoding` | errors=ignore |
Using UTF-8 encoding by default and being able to configure the encoding
are new features in Robot Framework 2.7.6. In earlier versions only ASCII
was supported and encoding errors were silently ignored. Robot Framework
2.7.7 added a possibility to specify the error handler, changed the
default behavior back to ignoring encoding errors, and added the
possibility to disable encoding.
== Default log level ==
Default log level specifies the log level keywords use for `logging` unless
they are given an explicit log level. The default value is ``INFO``, and
changing it, for example, to ``DEBUG`` can be a good idea if there is lot
of unnecessary output that makes log files big.
Configuring default log level in `importing` and with `Open Connection`
are new features in Robot Framework 2.7.6. In earlier versions only
`Set Default Log Level` could be used.
== Terminal type ==
By default the Telnet library does not negotiate any specific terminal type
with the server. If a specific terminal type, for example ``vt100``, is
desired, the terminal type can be configured in `importing` and with
`Open Connection`.
New in Robot Framework 2.8.2.
== Window size ==
Window size for negotiation with the server can be configured when
`importing` the library and with `Open Connection`.
New in Robot Framework 2.8.2.
== USER environment variable ==
Telnet protocol allows the ``USER`` environment variable to be sent when
connecting to the server. On some servers it may happen that there is no
login prompt, and on those cases this configuration option will allow still
to define the desired username. The option ``environ_user`` can be used in
`importing` and with `Open Connection`.
New in Robot Framework 2.8.2.
= Terminal emulation =
Starting from Robot Framework 2.8.2, Telnet library supports terminal
emulation with [https://github.com/selectel/pyte|Pyte]. Terminal emulation
will process the output in a virtual screen. This means that ANSI escape
codes, like cursor movements, and also control characters, like
carriage returns and backspaces, have the same effect on the result as they
would have on a normal terminal screen. For example the sequence
``acdc\\x1b[3Dbba`` will result in output ``abba``.
Terminal emulation is taken into use by giving ``terminal_emulation``
argument a true value (see `Boolean arguments`) either in the library
initialization or with `Open Connection`.
As Pyte approximates vt-style terminal, you may also want to set the
terminal type as ``vt100``. We also recommend that you increase the window
size, as the terminal emulation will break all lines that are longer than
the window row length.
When terminal emulation is used, the `newline` and `encoding` can not be
changed anymore after opening the connection.
As a prerequisite for using terminal emulation you need to have
[https://github.com/selectel/pyte|Pyte] installed. This is easiest done
with [http://pip-installer.org|pip] by running ``pip install pyte``.
Examples:
| `Open Connection` | lolcathost | terminal_emulation=True | terminal_type=vt100 | window_size=400x100 |
= Logging =
All keywords that read something log the output. These keywords take the
log level to use as an optional argument, and if no log level is specified
they use the [#Configuration|configured] default value.
The valid log levels to use are ``TRACE``, ``DEBUG``, ``INFO`` (default),
and ``WARN``. Levels below ``INFO`` are not shown in log files by default
whereas warnings are shown more prominently.
The [http://docs.python.org/2/library/telnetlib.html|telnetlib module]
used by this library has a custom logging system for logging content it
sends and receives. By default these messages are written using ``TRACE``
level. Starting with Robot Framework 2.8.7 the level is configurable
with the ``telnetlib_log_level`` option either in the library initialization,
to the `Open Connection` or by using the `Set Telnetlib Log Level`
keyword to the active connection. Special level ``NONE`` con be used to
disable the logging altogether.
= Time string format =
Timeouts and other times used must be given as a time string using format
like ``15 seconds`` or ``1min 10s``. If the timeout is given as just
a number, for example, ``10`` or ``1.5``, it is considered to be seconds.
The time string format is described in more detail in an appendix of
[http://robotframework.org/robotframework/#user-guide|Robot Framework User Guide].
= Boolean arguments =
Some keywords accept arguments that are handled as Boolean values true or
false. If such an argument is given as a string, it is considered false if
it is either empty or case-insensitively equal to ``false`` or ``no``.
Other strings are considered true regardless their value, and other
argument types are tested using same
[http://docs.python.org/2/library/stdtypes.html#truth-value-testing|rules
as in Python].
True examples:
| `Open Connection` | lolcathost | terminal_emulation=True | # Strings are generally true. |
| `Open Connection` | lolcathost | terminal_emulation=yes | # Same as the above. |
| `Open Connection` | lolcathost | terminal_emulation=${TRUE} | # Python ``True`` is true. |
| `Open Connection` | lolcathost | terminal_emulation=${42} | # Numbers other than 0 are true. |
False examples:
| `Open Connection` | lolcathost | terminal_emulation=False | # String ``false`` is false. |
| `Open Connection` | lolcathost | terminal_emulation=no | # Also string ``no`` is false. |
| `Open Connection` | lolcathost | terminal_emulation=${EMPTY} | # Empty string is false. |
| `Open Connection` | lolcathost | terminal_emulation=${FALSE} | # Python ``False`` is false. |
Note that prior to Robot Framework 2.9 some keywords considered all
non-empty strings, including ``false`` and ``no``, to be true.
"""
ROBOT_LIBRARY_SCOPE = 'TEST_SUITE'
ROBOT_LIBRARY_VERSION = get_version()
def __init__(self, timeout='3 seconds', newline='CRLF',
prompt=None, prompt_is_regexp=False,
encoding='UTF-8', encoding_errors='ignore',
default_log_level='INFO', window_size=None,
environ_user=None, terminal_emulation=False,
terminal_type=None, telnetlib_log_level='TRACE'):
"""Telnet library can be imported with optional configuration parameters.
Configuration parameters are used as default values when new
connections are opened with `Open Connection` keyword. They can also be
overridden after opening the connection using the `Set ...` `keywords`.
See these keywords as well as `Configuration`, `Terminal emulation` and
`Logging` sections above for more information about these parameters
and their possible values.
See `Time string format` and `Boolean arguments` sections for
information about using arguments accepting times and Boolean values,
respectively.
Examples (use only one of these):
| = Setting = | = Value = | = Value = | = Value = | = Value = | = Comment = |
| Library | Telnet | | | | # default values |
| Library | Telnet | 5 seconds | | | # set only timeout |
| Library | Telnet | newline=LF | encoding=ISO-8859-1 | | # set newline and encoding using named arguments |
| Library | Telnet | prompt=$ | | | # set prompt |
| Library | Telnet | prompt=(> |# ) | prompt_is_regexp=yes | | # set prompt as a regular expression |
| Library | Telnet | terminal_emulation=True | terminal_type=vt100 | window_size=400x100 | # use terminal emulation with defined window size and terminal type |
| Library | Telnet | telnetlib_log_level=NONE | | | # disable logging messages from the underlying telnetlib |
"""
self._timeout = timeout or 3.0
self._newline = newline or 'CRLF'
self._prompt = (prompt, prompt_is_regexp)
self._encoding = encoding
self._encoding_errors = encoding_errors
self._default_log_level = default_log_level
self._window_size = window_size
self._environ_user = environ_user
self._terminal_emulation = terminal_emulation
self._terminal_type = terminal_type
self._telnetlib_log_level = telnetlib_log_level
self._cache = ConnectionCache()
self._conn = None
self._conn_kws = self._lib_kws = None
def get_keyword_names(self):
return self._get_library_keywords() + self._get_connection_keywords()
def _get_library_keywords(self):
if self._lib_kws is None:
self._lib_kws = self._get_keywords(self, ['get_keyword_names'])
return self._lib_kws
def _get_keywords(self, source, excluded):
return [name for name in dir(source)
if self._is_keyword(name, source, excluded)]
def _is_keyword(self, name, source, excluded):
return (name not in excluded and
not name.startswith('_') and
name != 'get_keyword_names' and
inspect.ismethod(getattr(source, name)))
def _get_connection_keywords(self):
if self._conn_kws is None:
conn = self._get_connection()
excluded = [name for name in dir(telnetlib.Telnet())
if name not in ['write', 'read', 'read_until']]
self._conn_kws = self._get_keywords(conn, excluded)
return self._conn_kws
def __getattr__(self, name):
if name not in self._get_connection_keywords():
raise AttributeError(name)
# If no connection is initialized, get attributes from a non-active
# connection. This makes it possible for Robot to create keyword
# handlers when it imports the library.
return getattr(self._conn or self._get_connection(), name)
def open_connection(self, host, alias=None, port=23, timeout=None,
newline=None, prompt=None, prompt_is_regexp=False,
encoding=None, encoding_errors=None,
default_log_level=None, window_size=None,
environ_user=None, terminal_emulation=None,
terminal_type=None, telnetlib_log_level=None):
"""Opens a new Telnet connection to the given host and port.
The ``timeout``, ``newline``, ``prompt``, ``prompt_is_regexp``,
``encoding``, ``default_log_level``, ``window_size``, ``environ_user``,
``terminal_emulation``, ``terminal_type`` and ``telnetlib_log_level``
arguments get default values when the library is [#Importing|imported].
Setting them here overrides those values for the opened connection.
See `Configuration`, `Terminal emulation` and `Logging` sections for
more information about these parameters and their possible values.
Possible already opened connections are cached and it is possible to
switch back to them using `Switch Connection` keyword. It is possible to
switch either using explicitly given ``alias`` or using index returned
by this keyword. Indexing starts from 1 and is reset back to it by
`Close All Connections` keyword.
"""
timeout = timeout or self._timeout
newline = newline or self._newline
encoding = encoding or self._encoding
encoding_errors = encoding_errors or self._encoding_errors
default_log_level = default_log_level or self._default_log_level
window_size = self._parse_window_size(window_size or self._window_size)
environ_user = environ_user or self._environ_user
if terminal_emulation is None:
terminal_emulation = self._terminal_emulation
terminal_type = terminal_type or self._terminal_type
telnetlib_log_level = telnetlib_log_level or self._telnetlib_log_level
if not prompt:
prompt, prompt_is_regexp = self._prompt
logger.info('Opening connection to %s:%s with prompt: %s'
% (host, port, prompt))
self._conn = self._get_connection(host, port, timeout, newline,
prompt, is_truthy(prompt_is_regexp),
encoding, encoding_errors,
default_log_level,
window_size,
environ_user,
is_truthy(terminal_emulation),
terminal_type,
telnetlib_log_level)
return self._cache.register(self._conn, alias)
def _parse_window_size(self, window_size):
if not window_size:
return None
try:
cols, rows = window_size.split('x', 1)
return int(cols), int(rows)
except ValueError:
raise ValueError("Invalid window size '%s'. Should be "
"x." % window_size)
def _get_connection(self, *args):
"""Can be overridden to use a custom connection."""
return TelnetConnection(*args)
def switch_connection(self, index_or_alias):
"""Switches between active connections using an index or an alias.
Aliases can be given to `Open Connection` keyword which also always
returns the connection index.
This keyword returns the index of previous active connection.
Example:
| `Open Connection` | myhost.net | | |
| `Login` | john | secret | |
| `Write` | some command | | |
| `Open Connection` | yourhost.com | 2nd conn | |
| `Login` | root | password | |
| `Write` | another cmd | | |
| ${old index}= | `Switch Connection` | 1 | # index |
| `Write` | something | | |
| `Switch Connection` | 2nd conn | | # alias |
| `Write` | whatever | | |
| `Switch Connection` | ${old index} | | # back to original |
| [Teardown] | `Close All Connections` | | |
The example above expects that there were no other open
connections when opening the first one, because it used index
``1`` when switching to the connection later. If you are not
sure about that, you can store the index into a variable as
shown below.
| ${index} = | `Open Connection` | myhost.net |
| `Do Something` | | |
| `Switch Connection` | ${index} | |
"""
old_index = self._cache.current_index
self._conn = self._cache.switch(index_or_alias)
return old_index
def close_all_connections(self):
"""Closes all open connections and empties the connection cache.
If multiple connections are opened, this keyword should be used in
a test or suite teardown to make sure that all connections are closed.
It is not an error is some of the connections have already been closed
by `Close Connection`.
After this keyword, new indexes returned by `Open Connection`
keyword are reset to 1.
"""
self._conn = self._cache.close_all()
class TelnetConnection(telnetlib.Telnet):
NEW_ENVIRON_IS = chr(0)
NEW_ENVIRON_VAR = chr(0)
NEW_ENVIRON_VALUE = chr(1)
INTERNAL_UPDATE_FREQUENCY = 0.03
def __init__(self, host=None, port=23, timeout=3.0, newline='CRLF',
prompt=None, prompt_is_regexp=False,
encoding='UTF-8', encoding_errors='ignore',
default_log_level='INFO', window_size=None, environ_user=None,
terminal_emulation=False, terminal_type=None,
telnetlib_log_level='TRACE'):
telnetlib.Telnet.__init__(self, host, int(port) if port else 23)
self._set_timeout(timeout)
self._set_newline(newline)
self._set_prompt(prompt, prompt_is_regexp)
self._set_encoding(encoding, encoding_errors)
self._set_default_log_level(default_log_level)
self._window_size = window_size
self._environ_user = environ_user
self._terminal_emulator = self._check_terminal_emulation(terminal_emulation)
self._terminal_type = str(terminal_type) if terminal_type else None
self.set_option_negotiation_callback(self._negotiate_options)
self._set_telnetlib_log_level(telnetlib_log_level)
self._opt_responses = list()
def set_timeout(self, timeout):
"""Sets the timeout used for waiting output in the current connection.
Read operations that expect some output to appear (`Read Until`, `Read
Until Regexp`, `Read Until Prompt`, `Login`) use this timeout and fail
if the expected output does not appear before this timeout expires.
The ``timeout`` must be given in `time string format`. The old timeout
is returned and can be used to restore the timeout later.
Example:
| ${old} = | `Set Timeout` | 2 minute 30 seconds |
| `Do Something` |
| `Set Timeout` | ${old} |
See `Configuration` section for more information about global and
connection specific configuration.
"""
self._verify_connection()
old = self._timeout
self._set_timeout(timeout)
return secs_to_timestr(old)
def _set_timeout(self, timeout):
self._timeout = timestr_to_secs(timeout)
def set_newline(self, newline):
"""Sets the newline used by `Write` keyword in the current connection.
The old newline is returned and can be used to restore the newline later.
See `Set Timeout` for a similar example.
If terminal emulation is used, the newline can not be changed on an open
connection.
See `Configuration` section for more information about global and
connection specific configuration.
"""
self._verify_connection()
if self._terminal_emulator:
raise AssertionError("Newline can not be changed when terminal emulation is used.")
old = self._newline
self._set_newline(newline)
return old
def _set_newline(self, newline):
newline = str(newline).upper()
self._newline = newline.replace('LF', '\n').replace('CR', '\r')
def set_prompt(self, prompt, prompt_is_regexp=False):
"""Sets the prompt used by `Read Until Prompt` and `Login` in the current connection.
If ``prompt_is_regexp`` is given a true value (see `Boolean arguments`),
the given ``prompt`` is considered to be a regular expression.
The old prompt is returned and can be used to restore the prompt later.
Example:
| ${prompt} | ${regexp} = | `Set Prompt` | $ |
| `Do Something` |
| `Set Prompt` | ${prompt} | ${regexp} |
See the documentation of
[http://docs.python.org/2/library/re.html|Python re module]
for more information about the supported regular expression syntax.
Notice that possible backslashes need to be escaped in Robot Framework
test data.
See `Configuration` section for more information about global and
connection specific configuration.
"""
self._verify_connection()
old = self._prompt
self._set_prompt(prompt, prompt_is_regexp)
if old[1]:
return old[0].pattern, True
return old
def _set_prompt(self, prompt, prompt_is_regexp):
if is_truthy(prompt_is_regexp):
self._prompt = (re.compile(prompt), True)
else:
self._prompt = (prompt, False)
def _prompt_is_set(self):
return self._prompt[0] is not None
def set_encoding(self, encoding=None, errors=None):
"""Sets the encoding to use for `writing and reading` in the current connection.
The given ``encoding`` specifies the encoding to use when written/read
text is encoded/decoded, and ``errors`` specifies the error handler to
use if encoding/decoding fails. Either of these can be omitted and in
that case the old value is not affected. Use string ``NONE`` to disable
encoding altogether.
See `Configuration` section for more information about encoding and
error handlers, as well as global and connection specific configuration
in general.
The old values are returned and can be used to restore the encoding
and the error handler later. See `Set Prompt` for a similar example.
If terminal emulation is used, the encoding can not be changed on an open
connection.
Setting encoding in general is a new feature in Robot Framework 2.7.6.
Specifying the error handler and disabling encoding were added in 2.7.7.
"""
self._verify_connection()
if self._terminal_emulator:
raise AssertionError("Encoding can not be changed when terminal emulation is used.")
old = self._encoding
self._set_encoding(encoding or old[0], errors or old[1])
return old
def _set_encoding(self, encoding, errors):
self._encoding = (encoding.upper(), errors)
def _encode(self, text):
if is_bytes(text):
return text
if self._encoding[0] == 'NONE':
return str(text)
return text.encode(*self._encoding)
def _decode(self, bytes):
if self._encoding[0] == 'NONE':
return bytes
return bytes.decode(*self._encoding)
def set_telnetlib_log_level(self, level):
"""Sets the log level used for `logging` in the underlying ``telnetlib``.
Note that ``telnetlib`` can be very noisy thus using the level ``NONE``
can shutdown the messages generated by this library.
New in Robot Framework 2.8.7.
"""
self._verify_connection()
old = self._telnetlib_log_level
self._set_telnetlib_log_level(level)
return old
def _set_telnetlib_log_level(self, level):
if level.upper() == 'NONE':
self._telnetlib_log_level = 'NONE'
elif self._is_valid_log_level(level) is False:
raise AssertionError("Invalid log level '%s'" % level)
self._telnetlib_log_level = level.upper()
def set_default_log_level(self, level):
"""Sets the default log level used for `logging` in the current connection.
The old default log level is returned and can be used to restore the
log level later.
See `Configuration` section for more information about global and
connection specific configuration.
"""
self._verify_connection()
old = self._default_log_level
self._set_default_log_level(level)
return old
def _set_default_log_level(self, level):
if level is None or not self._is_valid_log_level(level):
raise AssertionError("Invalid log level '%s'" % level)
self._default_log_level = level.upper()
def _is_valid_log_level(self, level):
if level is None:
return True
if not is_string(level):
return False
return level.upper() in ('TRACE', 'DEBUG', 'INFO', 'WARN')
def close_connection(self, loglevel=None):
"""Closes the current Telnet connection.
Remaining output in the connection is read, logged, and returned.
It is not an error to close an already closed connection.
Use `Close All Connections` if you want to make sure all opened
connections are closed.
See `Logging` section for more information about log levels.
"""
self.close()
output = self._decode(self.read_all())
self._log(output, loglevel)
return output
def login(self, username, password, login_prompt='login: ',
password_prompt='Password: ', login_timeout='1 second',
login_incorrect='Login incorrect'):
"""Logs in to the Telnet server with the given user information.
This keyword reads from the connection until the ``login_prompt`` is
encountered and then types the given ``username``. Then it reads until
the ``password_prompt`` and types the given ``password``. In both cases
a newline is appended automatically and the connection specific
timeout used when waiting for outputs.
How logging status is verified depends on whether a prompt is set for
this connection or not:
1) If the prompt is set, this keyword reads the output until the prompt
is found using the normal timeout. If no prompt is found, login is
considered failed and also this keyword fails. Note that in this case
both ``login_timeout`` and ``login_incorrect`` arguments are ignored.
2) If the prompt is not set, this keywords sleeps until ``login_timeout``
and then reads all the output available on the connection. If the
output contains ``login_incorrect`` text, login is considered failed
and also this keyword fails. Both of these configuration parameters
were added in Robot Framework 2.7.6. In earlier versions they were
hard coded.
See `Configuration` section for more information about setting
newline, timeout, and prompt.
"""
output = self._submit_credentials(username, password, login_prompt,
password_prompt)
if self._prompt_is_set():
success, output2 = self._read_until_prompt()
else:
success, output2 = self._verify_login_without_prompt(
login_timeout, login_incorrect)
output += output2
self._log(output)
if not success:
raise AssertionError('Login incorrect')
return output
def _submit_credentials(self, username, password, login_prompt, password_prompt):
# Using write_bare here instead of write because don't want to wait for
# newline: https://github.com/robotframework/robotframework/issues/1371
output = self.read_until(login_prompt, 'TRACE')
self.write_bare(username + self._newline)
output += self.read_until(password_prompt, 'TRACE')
self.write_bare(password + self._newline)
return output
def _verify_login_without_prompt(self, delay, incorrect):
time.sleep(timestr_to_secs(delay))
output = self.read('TRACE')
success = incorrect not in output
return success, output
def write(self, text, loglevel=None):
"""Writes the given text plus a newline into the connection.
The newline character sequence to use can be [#Configuration|configured]
both globally and per connection basis. The default value is ``CRLF``.
This keyword consumes the written text, until the added newline, from
the output and logs and returns it. The given text itself must not
contain newlines. Use `Write Bare` instead if either of these features
causes a problem.
*Note:* This keyword does not return the possible output of the executed
command. To get the output, one of the `Read ...` `keywords` must be
used. See `Writing and reading` section for more details.
See `Logging` section for more information about log levels.
"""
if self._newline in text:
raise RuntimeError("'Write' keyword cannot be used with strings "
"containing newlines. Use 'Write Bare' instead.")
self.write_bare(text + self._newline)
# Can't read until 'text' because long lines are cut strangely in the output
return self.read_until(self._newline, loglevel)
def write_bare(self, text):
"""Writes the given text, and nothing else, into the connection.
This keyword does not append a newline nor consume the written text.
Use `Write` if these features are needed.
"""
self._verify_connection()
telnetlib.Telnet.write(self, self._encode(text))
def write_until_expected_output(self, text, expected, timeout,
retry_interval, loglevel=None):
"""Writes the given ``text`` repeatedly, until ``expected`` appears in the output.
``text`` is written without appending a newline and it is consumed from
the output before trying to find ``expected``. If ``expected`` does not
appear in the output within ``timeout``, this keyword fails.
``retry_interval`` defines the time to wait ``expected`` to appear before
writing the ``text`` again. Consuming the written ``text`` is subject to
the normal [#Configuration|configured timeout].
Both ``timeout`` and ``retry_interval`` must be given in `time string
format`. See `Logging` section for more information about log levels.
Example:
| Write Until Expected Output | ps -ef| grep myprocess\\r\\n | myprocess |
| ... | 5 s | 0.5 s |
The above example writes command ``ps -ef | grep myprocess\\r\\n`` until
``myprocess`` appears in the output. The command is written every 0.5
seconds and the keyword fails if ``myprocess`` does not appear in
the output in 5 seconds.
"""
timeout = timestr_to_secs(timeout)
retry_interval = timestr_to_secs(retry_interval)
maxtime = time.time() + timeout
while time.time() < maxtime:
self.write_bare(text)
self.read_until(text, loglevel)
try:
with self._custom_timeout(retry_interval):
return self.read_until(expected, loglevel)
except AssertionError:
pass
raise NoMatchError(expected, timeout)
def write_control_character(self, character):
"""Writes the given control character into the connection.
The control character is preprended with an IAC (interpret as command)
character.
The following control character names are supported: BRK, IP, AO, AYT,
EC, EL, NOP. Additionally, you can use arbitrary numbers to send any
control character.
Example:
| Write Control Character | BRK | # Send Break command |
| Write Control Character | 241 | # Send No operation command |
"""
self._verify_connection()
self.sock.sendall(telnetlib.IAC + self._get_control_character(character))
def _get_control_character(self, int_or_name):
try:
return chr(int(int_or_name))
except ValueError:
return self._convert_control_code_name_to_character(int_or_name)
def _convert_control_code_name_to_character(self, name):
code_names = {
'BRK' : telnetlib.BRK,
'IP' : telnetlib.IP,
'AO' : telnetlib.AO,
'AYT' : telnetlib.AYT,
'EC' : telnetlib.EC,
'EL' : telnetlib.EL,
'NOP' : telnetlib.NOP
}
try:
return code_names[name]
except KeyError:
raise RuntimeError("Unsupported control character '%s'." % name)
def read(self, loglevel=None):
"""Reads everything that is currently available in the output.
Read output is both returned and logged. See `Logging` section for more
information about log levels.
"""
self._verify_connection()
output = self.read_very_eager()
if self._terminal_emulator:
self._terminal_emulator.feed(output)
output = self._terminal_emulator.read()
else:
output = self._decode(output)
self._log(output, loglevel)
return output
def read_until(self, expected, loglevel=None):
"""Reads output until ``expected`` text is encountered.
Text up to and including the match is returned and logged. If no match
is found, this keyword fails. How much to wait for the output depends
on the [#Configuration|configured timeout].
See `Logging` section for more information about log levels. Use
`Read Until Regexp` if more complex matching is needed.
"""
success, output = self._read_until(expected)
self._log(output, loglevel)
if not success:
raise NoMatchError(expected, self._timeout, output)
return output
def _read_until(self, expected):
self._verify_connection()
if self._terminal_emulator:
return self._terminal_read_until(expected)
expected = self._encode(expected)
output = telnetlib.Telnet.read_until(self, expected, self._timeout)
return output.endswith(expected), self._decode(output)
@property
def _terminal_frequency(self):
return min(self.INTERNAL_UPDATE_FREQUENCY, self._timeout)
def _terminal_read_until(self, expected):
max_time = time.time() + self._timeout
out = self._terminal_emulator.read_until(expected)
if out:
return True, out
while time.time() < max_time:
input_bytes = telnetlib.Telnet.read_until(self, expected,
self._terminal_frequency)
self._terminal_emulator.feed(input_bytes)
out = self._terminal_emulator.read_until(expected)
if out:
return True, out
return False, self._terminal_emulator.read()
def _read_until_regexp(self, *expected):
self._verify_connection()
if self._terminal_emulator:
return self._terminal_read_until_regexp(expected)
expected = [self._encode(exp) if is_unicode(exp) else exp
for exp in expected]
return self._telnet_read_until_regexp(expected)
def _terminal_read_until_regexp(self, expected_list):
max_time = time.time() + self._timeout
regexp_list = [re.compile(rgx) for rgx in expected_list]
out = self._terminal_emulator.read_until_regexp(regexp_list)
if out:
return True, out
while time.time() < max_time:
output = self.expect(regexp_list, self._terminal_frequency)[-1]
self._terminal_emulator.feed(output)
out = self._terminal_emulator.read_until_regexp(regexp_list)
if out:
return True, out
return False, self._terminal_emulator.read()
def _telnet_read_until_regexp(self, expected_list):
try:
index, _, output = self.expect(expected_list, self._timeout)
except TypeError:
index, output = -1, ''
return index != -1, self._decode(output)
def read_until_regexp(self, *expected):
"""Reads output until any of the ``expected`` regular expressions match.
This keyword accepts any number of regular expressions patterns or
compiled Python regular expression objects as arguments. Text up to
and including the first match to any of the regular expressions is
returned and logged. If no match is found, this keyword fails. How much
to wait for the output depends on the [#Configuration|configured timeout].
If the last given argument is a [#Logging|valid log level], it is used
as ``loglevel`` similarly as with `Read Until` keyword.
See the documentation of
[http://docs.python.org/2/library/re.html|Python re module]
for more information about the supported regular expression syntax.
Notice that possible backslashes need to be escaped in Robot Framework
test data.
Examples:
| `Read Until Regexp` | (#|$) |
| `Read Until Regexp` | first_regexp | second_regexp |
| `Read Until Regexp` | \\\\d{4}-\\\\d{2}-\\\\d{2} | DEBUG |
"""
if not expected:
raise RuntimeError('At least one pattern required')
if self._is_valid_log_level(expected[-1]):
loglevel = expected[-1]
expected = expected[:-1]
else:
loglevel = None
success, output = self._read_until_regexp(*expected)
self._log(output, loglevel)
if not success:
expected = [exp if is_string(exp) else exp.pattern
for exp in expected]
raise NoMatchError(expected, self._timeout, output)
return output
def read_until_prompt(self, loglevel=None, strip_prompt=False):
"""Reads output until the prompt is encountered.
This keyword requires the prompt to be [#Configuration|configured]
either in `importing` or with `Open Connection` or `Set Prompt` keyword.
By default, text up to and including the prompt is returned and logged.
If no prompt is found, this keyword fails. How much to wait for the
output depends on the [#Configuration|configured timeout].
If you want to exclude the prompt from the returned output, set
``strip_prompt`` to a true value (see `Boolean arguments`). If your
prompt is a regular expression, make sure that the expression spans the
whole prompt, because only the part of the output that matches the
regular expression is stripped away.
See `Logging` section for more information about log levels.
Optionally stripping prompt is a new feature in Robot Framework 2.8.7.
"""
if not self._prompt_is_set():
raise RuntimeError('Prompt is not set.')
success, output = self._read_until_prompt()
self._log(output, loglevel)
if not success:
prompt, regexp = self._prompt
raise AssertionError("Prompt '%s' not found in %s."
% (prompt if not regexp else prompt.pattern,
secs_to_timestr(self._timeout)))
if is_truthy(strip_prompt):
output = self._strip_prompt(output)
return output
def _read_until_prompt(self):
prompt, regexp = self._prompt
read_until = self._read_until_regexp if regexp else self._read_until
return read_until(prompt)
def _strip_prompt(self, output):
prompt, regexp = self._prompt
if not regexp:
length = len(prompt)
else:
match = prompt.search(output)
length = match.end() - match.start()
return output[:-length]
def execute_command(self, command, loglevel=None, strip_prompt=False):
"""Executes the given ``command`` and reads, logs, and returns everything until the prompt.
This keyword requires the prompt to be [#Configuration|configured]
either in `importing` or with `Open Connection` or `Set Prompt` keyword.
This is a convenience keyword that uses `Write` and `Read Until Prompt`
internally. Following two examples are thus functionally identical:
| ${out} = | `Execute Command` | pwd |
| `Write` | pwd |
| ${out} = | `Read Until Prompt` |
See `Logging` section for more information about log levels and `Read
Until Prompt` for more information about the ``strip_prompt`` parameter.
"""
self.write(command, loglevel)
return self.read_until_prompt(loglevel, strip_prompt)
@contextmanager
def _custom_timeout(self, timeout):
old = self.set_timeout(timeout)
try:
yield
finally:
self.set_timeout(old)
def _verify_connection(self):
if not self.sock:
raise RuntimeError('No connection open')
def _log(self, msg, level=None):
msg = msg.strip()
if msg:
logger.write(msg, level or self._default_log_level)
def _negotiate_options(self, sock, cmd, opt):
# We don't have state changes in our accepted telnet options.
# Therefore, we just track if we've already responded to an option. If
# this is the case, we must not send any response.
if cmd in (telnetlib.DO, telnetlib.DONT, telnetlib.WILL, telnetlib.WONT):
if (cmd, opt) in self._opt_responses:
return
else:
self._opt_responses.append((cmd, opt))
# This is supposed to turn server side echoing on and turn other options off.
if opt == telnetlib.ECHO and cmd in (telnetlib.WILL, telnetlib.WONT):
self._opt_echo_on(opt)
elif cmd == telnetlib.DO and opt == telnetlib.TTYPE and self._terminal_type:
self._opt_terminal_type(opt, self._terminal_type)
elif cmd == telnetlib.DO and opt == telnetlib.NEW_ENVIRON and self._environ_user:
self._opt_environ_user(opt, self._environ_user)
elif cmd == telnetlib.DO and opt == telnetlib.NAWS and self._window_size:
self._opt_window_size(opt, *self._window_size)
elif opt != telnetlib.NOOPT:
self._opt_dont_and_wont(cmd, opt)
def _opt_echo_on(self, opt):
return self.sock.sendall(telnetlib.IAC + telnetlib.DO + opt)
def _opt_terminal_type(self, opt, terminal_type):
self.sock.sendall(telnetlib.IAC + telnetlib.WILL + opt)
self.sock.sendall(telnetlib.IAC + telnetlib.SB + telnetlib.TTYPE
+ self.NEW_ENVIRON_IS + terminal_type
+ telnetlib.IAC + telnetlib.SE)
def _opt_environ_user(self, opt, environ_user):
self.sock.sendall(telnetlib.IAC + telnetlib.WILL + opt)
self.sock.sendall(telnetlib.IAC + telnetlib.SB + telnetlib.NEW_ENVIRON
+ self.NEW_ENVIRON_IS + self.NEW_ENVIRON_VAR
+ "USER" + self.NEW_ENVIRON_VALUE + environ_user
+ telnetlib.IAC + telnetlib.SE)
def _opt_window_size(self, opt, window_x, window_y):
self.sock.sendall(telnetlib.IAC + telnetlib.WILL + opt)
self.sock.sendall(telnetlib.IAC + telnetlib.SB + telnetlib.NAWS
+ struct.pack('!HH', window_x, window_y)
+ telnetlib.IAC + telnetlib.SE)
def _opt_dont_and_wont(self, cmd, opt):
if cmd in (telnetlib.DO, telnetlib.DONT):
self.sock.sendall(telnetlib.IAC + telnetlib.WONT + opt)
elif cmd in (telnetlib.WILL, telnetlib.WONT):
self.sock.sendall(telnetlib.IAC + telnetlib.DONT + opt)
def msg(self, msg, *args):
# Forward telnetlib's debug messages to log
if self._telnetlib_log_level != 'NONE':
logger.write(msg % args, self._telnetlib_log_level)
def _check_terminal_emulation(self, terminal_emulation):
if not terminal_emulation:
return False
if not pyte:
raise RuntimeError("Terminal emulation requires pyte module!\n"
"https://pypi.python.org/pypi/pyte/")
return TerminalEmulator(window_size=self._window_size,
newline=self._newline, encoding=self._encoding)
class TerminalEmulator(object):
def __init__(self, window_size=None, newline="\r\n",
encoding=('UTF-8', 'ignore')):
self._rows, self._columns = window_size or (200, 200)
self._newline = newline
self._stream = pyte.ByteStream(encodings=[encoding])
self._screen = pyte.HistoryScreen(self._rows,
self._columns,
history=100000)
self._stream.attach(self._screen)
self._screen.set_charset('B', '(')
self._buffer = ''
self._whitespace_after_last_feed = ''
@property
def current_output(self):
return self._buffer + self._dump_screen()
def _dump_screen(self):
return self._get_history() + \
self._get_screen(self._screen) + \
self._whitespace_after_last_feed
def _get_history(self):
if self._screen.history.top:
return self._get_history_screen(self._screen.history.top) + self._newline
return ''
def _get_history_screen(self, deque):
return self._newline.join(''.join(c.data for c in row).rstrip()
for row in deque).rstrip(self._newline)
def _get_screen(self, screen):
return self._newline.join(row.rstrip() for row in screen.display).rstrip(self._newline)
def feed(self, input_bytes):
self._stream.feed(input_bytes)
self._whitespace_after_last_feed = input_bytes[len(input_bytes.rstrip()):]
def read(self):
current_out = self.current_output
self._update_buffer('')
return current_out
def read_until(self, expected):
current_out = self.current_output
exp_index = current_out.find(expected)
if exp_index != -1:
self._update_buffer(current_out[exp_index+len(expected):])
return current_out[:exp_index+len(expected)]
return None
def read_until_regexp(self, regexp_list):
current_out = self.current_output
for rgx in regexp_list:
match = rgx.search(current_out)
if match:
self._update_buffer(current_out[match.end():])
return current_out[:match.end()]
return None
def _update_buffer(self, terminal_buffer):
self._buffer = terminal_buffer
self._whitespace_after_last_feed = ''
self._screen.reset()
self._screen.set_charset('B', '(')
class NoMatchError(AssertionError):
ROBOT_SUPPRESS_NAME = True
def __init__(self, expected, timeout, output=None):
self.expected = expected
self.timeout = secs_to_timestr(timeout)
self.output = output
AssertionError.__init__(self, self._get_message())
def _get_message(self):
expected = "'%s'" % self.expected \
if is_string(self.expected) \
else seq2str(self.expected, lastsep=' or ')
msg = "No match found for %s in %s." % (expected, self.timeout)
if self.output is not None:
msg += ' Output:\n%s' % self.output
return msg
# -*- encoding: utf8 -*-
import re
from setuptools import find_packages, setup
def _read_long_description():
try:
import pypandoc
return pypandoc.convert('README.md', 'rst', format='markdown')
except Exception:
return None
version = ''
with open('frigg_settings/__init__.py', 'r') as fd:
version = re.search(
r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
fd.read(),
re.MULTILINE
).group(1)
setup(
name='frigg-settings',
version=version,
description='A module for parsing and discovery of frigg settings file',
long_description=_read_long_description(),
packages=find_packages(exclude='tests'),
author='The frigg team',
author_email='hi@frigg.io',
license='MIT',
url='https://github.com/frigg/frigg-settings',
py_modules=['frigg_test_discovery'],
include_package_data=True,
install_requires=[
'pyyaml==3.11',
'frigg-test-discovery>1.0,<2.0',
],
classifiers=[
'Programming Language :: Python :: 3',
]
)
#
# ASN.1 subtype constraints classes.
#
# Constraints are relatively rare, but every ASN1 object
# is doing checks all the time for whether they have any
# constraints and whether they are applicable to the object.
#
# What we're going to do is define objects/functions that
# can be called unconditionally if they are present, and that
# are simply not present if there are no constraints.
#
# Original concept and code by Mike C. Fletcher.
#
import sys
from pyasn1.type import error
class AbstractConstraint:
"""Abstract base-class for constraint objects
Constraints should be stored in a simple sequence in the
namespace of their client Asn1Item sub-classes.
"""
def __init__(self, *values):
self._valueMap = {}
self._setValues(values)
self.__hashedValues = None
def __call__(self, value, idx=None):
try:
self._testValue(value, idx)
except error.ValueConstraintError:
raise error.ValueConstraintError(
'%s failed at: \"%s\"' % (self, sys.exc_info()[1])
)
def __repr__(self):
return '%s(%s)' % (
self.__class__.__name__,
', '.join([repr(x) for x in self._values])
)
def __eq__(self, other):
return self is other and True or self._values == other
def __ne__(self, other): return self._values != other
def __lt__(self, other): return self._values < other
def __le__(self, other): return self._values <= other
def __gt__(self, other): return self._values > other
def __ge__(self, other): return self._values >= other
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(self._values)
else:
def __bool__(self): return bool(self._values)
def __hash__(self):
if self.__hashedValues is None:
self.__hashedValues = hash((self.__class__.__name__, self._values))
return self.__hashedValues
def _setValues(self, values): self._values = values
def _testValue(self, value, idx):
raise error.ValueConstraintError(value)
# Constraints derivation logic
def getValueMap(self): return self._valueMap
def isSuperTypeOf(self, otherConstraint):
return self in otherConstraint.getValueMap() or \
otherConstraint is self or otherConstraint == self
def isSubTypeOf(self, otherConstraint):
return otherConstraint in self._valueMap or \
otherConstraint is self or otherConstraint == self
class SingleValueConstraint(AbstractConstraint):
"""Value must be part of defined values constraint"""
def _testValue(self, value, idx):
# XXX index vals for performance?
if value not in self._values:
raise error.ValueConstraintError(value)
class ContainedSubtypeConstraint(AbstractConstraint):
"""Value must satisfy all of defined set of constraints"""
def _testValue(self, value, idx):
for c in self._values:
c(value, idx)
class ValueRangeConstraint(AbstractConstraint):
"""Value must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
if value < self.start or value > self.stop:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 2:
raise error.PyAsn1Error(
'%s: bad constraint values' % (self.__class__.__name__,)
)
self.start, self.stop = values
if self.start > self.stop:
raise error.PyAsn1Error(
'%s: screwed constraint values (start > stop): %s > %s' % (
self.__class__.__name__,
self.start, self.stop
)
)
AbstractConstraint._setValues(self, values)
class ValueSizeConstraint(ValueRangeConstraint):
"""len(value) must be within start and stop values (inclusive)"""
def _testValue(self, value, idx):
l = len(value)
if l < self.start or l > self.stop:
raise error.ValueConstraintError(value)
class PermittedAlphabetConstraint(SingleValueConstraint):
def _setValues(self, values):
self._values = ()
for v in values:
self._values = self._values + tuple(v)
def _testValue(self, value, idx):
for v in value:
if v not in self._values:
raise error.ValueConstraintError(value)
# This is a bit kludgy, meaning two op modes within a single constraing
class InnerTypeConstraint(AbstractConstraint):
"""Value must satisfy type and presense constraints"""
def _testValue(self, value, idx):
if self.__singleTypeConstraint:
self.__singleTypeConstraint(value)
elif self.__multipleTypeConstraint:
if idx not in self.__multipleTypeConstraint:
raise error.ValueConstraintError(value)
constraint, status = self.__multipleTypeConstraint[idx]
if status == 'ABSENT': # XXX presense is not checked!
raise error.ValueConstraintError(value)
constraint(value)
def _setValues(self, values):
self.__multipleTypeConstraint = {}
self.__singleTypeConstraint = None
for v in values:
if isinstance(v, tuple):
self.__multipleTypeConstraint[v[0]] = v[1], v[2]
else:
self.__singleTypeConstraint = v
AbstractConstraint._setValues(self, values)
# Boolean ops on constraints
class ConstraintsExclusion(AbstractConstraint):
"""Value must not fit the single constraint"""
def _testValue(self, value, idx):
try:
self._values[0](value, idx)
except error.ValueConstraintError:
return
else:
raise error.ValueConstraintError(value)
def _setValues(self, values):
if len(values) != 1:
raise error.PyAsn1Error('Single constraint expected')
AbstractConstraint._setValues(self, values)
class AbstractConstraintSet(AbstractConstraint):
"""Value must not satisfy the single constraint"""
def __getitem__(self, idx): return self._values[idx]
def __add__(self, value): return self.__class__(self, value)
def __radd__(self, value): return self.__class__(self, value)
def __len__(self): return len(self._values)
# Constraints inclusion in sets
def _setValues(self, values):
self._values = values
for v in values:
self._valueMap[v] = 1
self._valueMap.update(v.getValueMap())
class ConstraintsIntersection(AbstractConstraintSet):
"""Value must satisfy all constraints"""
def _testValue(self, value, idx):
for v in self._values:
v(value, idx)
class ConstraintsUnion(AbstractConstraintSet):
"""Value must satisfy at least one constraint"""
def _testValue(self, value, idx):
for v in self._values:
try:
v(value, idx)
except error.ValueConstraintError:
pass
else:
return
raise error.ValueConstraintError(
'all of %s failed for \"%s\"' % (self._values, value)
)
# XXX
# add tests for type check
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""training python module.
Importing from tensorflow.python.estimator is unsupported
and will soon break!
"""
# pylint: disable=unused-import,g-bad-import-order,g-import-not-at-top,wildcard-import
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow_estimator.python.estimator import training
# Include attrs that start with single underscore.
_HAS_DYNAMIC_ATTRIBUTES = True
training.__all__ = [s for s in dir(training) if not s.startswith('__')]
from tensorflow_estimator.python.estimator.training import *
"""The tests for the TCP binary sensor platform."""
from copy import copy
from unittest.mock import patch, Mock
from homeassistant.components.sensor import tcp
from homeassistant.components.binary_sensor import tcp as bin_tcp
from tests.common import get_test_home_assistant
from tests.components.sensor import test_tcp
@patch('homeassistant.components.sensor.tcp.Sensor.update')
def test_setup_platform_valid_config(mock_update):
"""Should check the supplied config and call add_entities with Sensor."""
add_entities = Mock()
ret = bin_tcp.setup_platform(None, test_tcp.TEST_CONFIG, add_entities)
assert ret is None, "setup_platform() should return None if successful."
assert add_entities.called
assert isinstance(add_entities.call_args[0][0][0], bin_tcp.BinarySensor)
def test_setup_platform_invalid_config():
"""Should check the supplied config and return False if it is invalid."""
config = copy(test_tcp.TEST_CONFIG)
del config[tcp.CONF_HOST]
assert bin_tcp.setup_platform(None, config, None) is False
class TestTCPBinarySensor():
"""Test the TCP Binary Sensor."""
def setup_class(cls):
"""Setup things to be run when tests are started."""
cls.hass = get_test_home_assistant()
def teardown_class(cls):
"""Stop down everything that was started."""
cls.hass.stop()
def test_requires_additional_values(self):
"""Should require the additional config values specified."""
config = copy(test_tcp.TEST_CONFIG)
for key in bin_tcp.BinarySensor.required:
del config[key]
assert len(config) != len(test_tcp.TEST_CONFIG)
assert not bin_tcp.BinarySensor.validate_config(config)
@patch('homeassistant.components.sensor.tcp.Sensor.update')
def test_is_on_true(self, mock_update):
"""Should return True if _state is the same as value_on."""
sensor = bin_tcp.BinarySensor(self.hass, test_tcp.TEST_CONFIG)
sensor._state = test_tcp.TEST_CONFIG[tcp.CONF_VALUE_ON]
assert sensor.is_on
@patch('homeassistant.components.sensor.tcp.Sensor.update')
def test_is_on_false(self, mock_update):
"""Should return False if _state is not the same as value_on."""
sensor = bin_tcp.BinarySensor(self.hass, test_tcp.TEST_CONFIG)
sensor._state = "%s abc" % test_tcp.TEST_CONFIG[tcp.CONF_VALUE_ON]
assert not sensor.is_on
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.modules.net_tools.nios import nios_cname_record
from ansible.module_utils.net_tools.nios import api
from ansible.compat.tests.mock import patch, MagicMock, Mock
from .test_nios_module import TestNiosModule, load_fixture
class TestNiosCNameRecordModule(TestNiosModule):
module = nios_cname_record
def setUp(self):
super(TestNiosCNameRecordModule, self).setUp()
self.module = MagicMock(name='ansible.modules.net_tools.nios.nios_cname_record.WapiModule')
self.module.check_mode = False
self.module.params = {'provider': None}
self.mock_wapi = patch('ansible.modules.net_tools.nios.nios_cname_record.WapiModule')
self.exec_command = self.mock_wapi.start()
self.mock_wapi_run = patch('ansible.modules.net_tools.nios.nios_cname_record.WapiModule.run')
self.mock_wapi_run.start()
self.load_config = self.mock_wapi_run.start()
def tearDown(self):
super(TestNiosCNameRecordModule, self).tearDown()
self.mock_wapi.stop()
self.mock_wapi_run.stop()
def _get_wapi(self, test_object):
wapi = api.WapiModule(self.module)
wapi.get_object = Mock(name='get_object', return_value=test_object)
wapi.create_object = Mock(name='create_object')
wapi.update_object = Mock(name='update_object')
wapi.delete_object = Mock(name='delete_object')
return wapi
def load_fixtures(self, commands=None):
self.exec_command.return_value = (0, load_fixture('nios_result.txt').strip(), None)
self.load_config.return_value = dict(diff=None, session='session')
def test_nios_a_record_create(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'cname.ansible.com',
'canonical': 'realhost.ansible.com', 'comment': None, 'extattrs': None}
test_object = None
test_spec = {
"name": {"ib_req": True},
"canonical": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
print("WAPI: ", wapi.__dict__)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.create_object.assert_called_once_with('testobject', {'name': self.module._check_type_dict().__getitem__(),
'canonical': 'realhost.ansible.com'})
def test_nios_a_record_update_comment(self):
self.module.params = {'provider': None, 'state': 'present', 'name': 'cname.ansible.com',
'canonical': 'realhost.ansible.com', 'comment': 'updated comment', 'extattrs': None}
test_object = [
{
"comment": "test comment",
"_ref": "cnamerecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/true",
"name": "cname.ansible.com",
"canonical": "realhost.ansible.com",
"extattrs": {}
}
]
test_spec = {
"name": {"ib_req": True},
"canonical": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
def test_nios_a_record_remove(self):
self.module.params = {'provider': None, 'state': 'absent', 'name': 'cname.ansible.com',
'canonical': 'realhost.ansible.com', 'comment': None, 'extattrs': None}
ref = "cnamerecord/ZG5zLm5ldHdvcmtfdmlldyQw:default/false"
test_object = [{
"comment": "test comment",
"_ref": ref,
"name": "cname.ansible.com",
"canonical": "realhost.ansible.com",
"extattrs": {'Site': {'value': 'test'}}
}]
test_spec = {
"name": {"ib_req": True},
"canonical": {"ib_req": True},
"comment": {},
"extattrs": {}
}
wapi = self._get_wapi(test_object)
res = wapi.run('testobject', test_spec)
self.assertTrue(res['changed'])
wapi.delete_object.assert_called_once_with(ref)
from unittest import skipIf
from django.test import TestCase, override_settings
from django.db import connection
from django.db.migrations.loader import MigrationLoader, AmbiguityError
from django.db.migrations.recorder import MigrationRecorder
from django.utils import six
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
recorder.applied_migrations(),
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
recorder.applied_migrations(),
set([("myapp", "0432_ponies")]),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
recorder.applied_migrations(),
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new Apps.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[
("migrations", "0001_initial"),
("migrations", "0002_second"),
],
)
# Now render it out!
project_state = migration_loader.graph.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_unmigdep"})
def test_load_unmigrated_dependency(self):
"""
Makes sure the loader can load migrations with a dependency on an unmigrated app.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0001_initial")),
[
("auth", "__first__"),
("migrations", "0001_initial"),
],
)
# Now render it out!
project_state = migration_loader.graph.project_state(("migrations", "0001_initial"))
self.assertEqual(len([m for a, m in project_state.models if a == "migrations"]), 1)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "user"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")
def test_load_import_error(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.import_error"}):
with self.assertRaises(ImportError):
MigrationLoader(connection)
def test_load_module_file(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.file"}):
MigrationLoader(connection)
@skipIf(six.PY2, "PY2 doesn't load empty dirs.")
def test_load_empty_dir(self):
with override_settings(MIGRATION_MODULES={"migrations": "migrations.faulty_migrations.namespace"}):
MigrationLoader(connection)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations_squashed"})
def test_loading_squashed(self):
"Tests loading a squashed migration"
migration_loader = MigrationLoader(connection)
recorder = MigrationRecorder(connection)
# Loading with nothing applied should just give us the one node
self.assertEqual(
len(migration_loader.graph.nodes),
1,
)
# However, fake-apply one migration and it should now use the old two
recorder.record_applied("migrations", "0001_initial")
migration_loader.build_graph()
self.assertEqual(
len(migration_loader.graph.nodes),
2,
)
recorder.flush()
"""Common pathname manipulations, JDK version.
Instead of importing this module directly, import os and refer to this
module as os.path.
"""
# Incompletely implemented:
# ismount -- How?
# normcase -- How?
# Missing:
# sameopenfile -- Java doesn't have fstat nor file descriptors?
# samestat -- How?
import stat
import sys
from java.io import File
import java.io.IOException
from java.lang import System
import os
from org.python.core.Py import newString as asPyString
import warnings
warnings.warn('The javapath module is deprecated. Use the os.path module.',
DeprecationWarning, 2)
def _tostr(s, method):
if isinstance(s, basestring):
return s
raise TypeError, "%s() argument must be a str or unicode object, not %s" % (
method, _type_name(s))
def _type_name(obj):
TPFLAGS_HEAPTYPE = 1 << 9
type_name = ''
obj_type = type(obj)
is_heap = obj_type.__flags__ & TPFLAGS_HEAPTYPE == TPFLAGS_HEAPTYPE
if not is_heap and obj_type.__module__ != '__builtin__':
type_name = '%s.' % obj_type.__module__
type_name += obj_type.__name__
return type_name
def dirname(path):
"""Return the directory component of a pathname"""
path = _tostr(path, "dirname")
result = asPyString(File(path).getParent())
if not result:
if isabs(path):
result = path # Must be root
else:
result = ""
return result
def basename(path):
"""Return the final component of a pathname"""
path = _tostr(path, "basename")
return asPyString(File(path).getName())
def split(path):
"""Split a pathname.
Return tuple "(head, tail)" where "tail" is everything after the
final slash. Either part may be empty.
"""
path = _tostr(path, "split")
return (dirname(path), basename(path))
def splitext(path):
"""Split the extension from a pathname.
Extension is everything from the last dot to the end. Return
"(root, ext)", either part may be empty.
"""
i = 0
n = -1
for c in path:
if c == '.': n = i
i = i+1
if n < 0:
return (path, "")
else:
return (path[:n], path[n:])
def splitdrive(path):
"""Split a pathname into drive and path specifiers.
Returns a 2-tuple "(drive,path)"; either part may be empty.
"""
# Algorithm based on CPython's ntpath.splitdrive and ntpath.isabs.
if path[1:2] == ':' and path[0].lower() in 'abcdefghijklmnopqrstuvwxyz' \
and (path[2:] == '' or path[2] in '/\\'):
return path[:2], path[2:]
return '', path
def exists(path):
"""Test whether a path exists.
Returns false for broken symbolic links.
"""
path = _tostr(path, "exists")
return File(sys.getPath(path)).exists()
def isabs(path):
"""Test whether a path is absolute"""
path = _tostr(path, "isabs")
return File(path).isAbsolute()
def isfile(path):
"""Test whether a path is a regular file"""
path = _tostr(path, "isfile")
return File(sys.getPath(path)).isFile()
def isdir(path):
"""Test whether a path is a directory"""
path = _tostr(path, "isdir")
return File(sys.getPath(path)).isDirectory()
def join(path, *args):
"""Join two or more pathname components, inserting os.sep as needed"""
path = _tostr(path, "join")
f = File(path)
for a in args:
a = _tostr(a, "join")
g = File(a)
if g.isAbsolute() or len(f.getPath()) == 0:
f = g
else:
if a == "":
a = os.sep
f = File(f, a)
return asPyString(f.getPath())
def normcase(path):
"""Normalize case of pathname.
XXX Not done right under JDK.
"""
path = _tostr(path, "normcase")
return asPyString(File(path).getPath())
def commonprefix(m):
"Given a list of pathnames, return the longest common leading component"
if not m: return ''
prefix = m[0]
for item in m:
for i in range(len(prefix)):
if prefix[:i+1] <> item[:i+1]:
prefix = prefix[:i]
if i == 0: return ''
break
return prefix
def islink(path):
"""Test whether a path is a symbolic link"""
try:
st = os.lstat(path)
except (os.error, AttributeError):
return False
return stat.S_ISLNK(st.st_mode)
def samefile(path, path2):
"""Test whether two pathnames reference the same actual file"""
path = _tostr(path, "samefile")
path2 = _tostr(path2, "samefile")
return _realpath(path) == _realpath(path2)
def ismount(path):
"""Test whether a path is a mount point.
XXX This incorrectly always returns false under JDK.
"""
return 0
def walk(top, func, arg):
"""Walk a directory tree.
walk(top,func,args) calls func(arg, d, files) for each directory
"d" in the tree rooted at "top" (including "top" itself). "files"
is a list of all the files and subdirs in directory "d".
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
for name in names:
name = join(top, name)
if isdir(name) and not islink(name):
walk(name, func, arg)
def expanduser(path):
if path[:1] == "~":
c = path[1:2]
if not c:
return gethome()
if c == os.sep:
return asPyString(File(gethome(), path[2:]).getPath())
return path
def getuser():
return System.getProperty("user.name")
def gethome():
return System.getProperty("user.home")
# normpath() from Python 1.5.2, with Java appropriate generalizations
# Normalize a path, e.g. A//B, A/./B and A/foo/../B all become A/B.
# It should be understood that this may change the meaning of the path
# if it contains symbolic links!
def normpath(path):
"""Normalize path, eliminating double slashes, etc."""
sep = os.sep
if sep == '\\':
path = path.replace("/", sep)
curdir = os.curdir
pardir = os.pardir
import string
# Treat initial slashes specially
slashes = ''
while path[:1] == sep:
slashes = slashes + sep
path = path[1:]
comps = string.splitfields(path, sep)
i = 0
while i < len(comps):
if comps[i] == curdir:
del comps[i]
while i < len(comps) and comps[i] == '':
del comps[i]
elif comps[i] == pardir and i > 0 and comps[i-1] not in ('', pardir):
del comps[i-1:i+1]
i = i-1
elif comps[i] == '' and i > 0 and comps[i-1] <> '':
del comps[i]
else:
i = i+1
# If the path is now empty, substitute '.'
if not comps and not slashes:
comps.append(curdir)
return slashes + string.joinfields(comps, sep)
def abspath(path):
"""Return an absolute path normalized but symbolic links not eliminated"""
path = _tostr(path, "abspath")
return _abspath(path)
def _abspath(path):
# Must use normpath separately because getAbsolutePath doesn't normalize
# and getCanonicalPath would eliminate symlinks.
return normpath(asPyString(File(sys.getPath(path)).getAbsolutePath()))
def realpath(path):
"""Return an absolute path normalized and symbolic links eliminated"""
path = _tostr(path, "realpath")
return _realpath(path)
def _realpath(path):
try:
return asPyString(File(sys.getPath(path)).getCanonicalPath())
except java.io.IOException:
return _abspath(path)
def getsize(path):
path = _tostr(path, "getsize")
f = File(sys.getPath(path))
size = f.length()
# Sadly, if the returned length is zero, we don't really know if the file
# is zero sized or does not exist.
if size == 0 and not f.exists():
raise OSError(0, 'No such file or directory', path)
return size
def getmtime(path):
path = _tostr(path, "getmtime")
f = File(sys.getPath(path))
if not f.exists():
raise OSError(0, 'No such file or directory', path)
return f.lastModified() / 1000.0
def getatime(path):
# We can't detect access time so we return modification time. This
# matches the behaviour in os.stat().
path = _tostr(path, "getatime")
f = File(sys.getPath(path))
if not f.exists():
raise OSError(0, 'No such file or directory', path)
return f.lastModified() / 1000.0
# expandvars is stolen from CPython-2.1.1's Lib/ntpath.py:
# Expand paths containing shell variable substitutions.
# The following rules apply:
# - no expansion within single quotes
# - no escape character, except for '$$' which is translated into '$'
# - ${varname} is accepted.
# - varnames can be made out of letters, digits and the character '_'
# XXX With COMMAND.COM you can use any characters in a variable name,
# XXX except '^|<>='.
def expandvars(path):
"""Expand shell variables of form $var and ${var}.
Unknown variables are left unchanged."""
if '$' not in path:
return path
import string
varchars = string.letters + string.digits + '_-'
res = ''
index = 0
pathlen = len(path)
while index < pathlen:
c = path[index]
if c == '\'': # no expansion within single quotes
path = path[index + 1:]
pathlen = len(path)
try:
index = path.index('\'')
res = res + '\'' + path[:index + 1]
except ValueError:
res = res + path
index = pathlen - 1
elif c == '$': # variable or '$$'
if path[index + 1:index + 2] == '$':
res = res + c
index = index + 1
elif path[index + 1:index + 2] == '{':
path = path[index+2:]
pathlen = len(path)
try:
index = path.index('}')
var = path[:index]
if os.environ.has_key(var):
res = res + os.environ[var]
except ValueError:
res = res + path
index = pathlen - 1
else:
var = ''
index = index + 1
c = path[index:index + 1]
while c != '' and c in varchars:
var = var + c
index = index + 1
c = path[index:index + 1]
if os.environ.has_key(var):
res = res + os.environ[var]
if c != '':
res = res + c
else:
res = res + c
index = index + 1
return res
from dimagi.ext.couchdbkit import Document
from django.db import models
COUCH_UUID_MAX_LEN = 50
class DeviceReportEntry(models.Model):
xform_id = models.CharField(max_length=COUCH_UUID_MAX_LEN, db_index=True)
i = models.IntegerField()
msg = models.TextField()
type = models.CharField(max_length=32, db_index=True)
date = models.DateTimeField(db_index=True)
domain = models.CharField(max_length=100, db_index=True)
device_id = models.CharField(max_length=COUCH_UUID_MAX_LEN, db_index=True,
null=True)
app_version = models.TextField(null=True)
username = models.CharField(max_length=100, db_index=True, null=True)
user_id = models.CharField(max_length=COUCH_UUID_MAX_LEN, db_index=True, null=True)
class Meta:
unique_together = [('xform_id', 'i')]
class UserEntry(models.Model):
xform_id = models.CharField(max_length=COUCH_UUID_MAX_LEN, db_index=True)
i = models.IntegerField()
user_id = models.CharField(max_length=COUCH_UUID_MAX_LEN)
sync_token = models.CharField(max_length=COUCH_UUID_MAX_LEN)
username = models.CharField(max_length=100, db_index=True)
class Meta:
unique_together = [('xform_id', 'i')]
class _(Document):
pass
# Copyright 2017 The Sonnet Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Implementation of convolutional Sonnet modules.
Classes defining convolutional operations, inheriting from `snt.Module`, with
easy weight sharing.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import math
import numbers
# Dependency imports
import numpy as np
from sonnet.python.modules import base
from sonnet.python.modules import util
import tensorflow as tf
# Strings for TensorFlow convolution padding modes. See the following
# documentation for an explanation of VALID versus SAME:
# https://www.tensorflow.org/api_guides/python/nn#Convolution
SAME = "SAME"
VALID = "VALID"
ALLOWED_PADDINGS = {SAME, VALID}
DATA_FORMAT_NCHW = "NCHW"
DATA_FORMAT_NHWC = "NHWC"
SUPPORTED_DATA_FORMATS = {DATA_FORMAT_NCHW, DATA_FORMAT_NHWC}
def _default_transpose_size(input_shape, stride, kernel_shape=None,
padding=SAME):
"""Returns default (maximal) output shape for a transpose convolution.
In general, there are multiple possible output shapes that a transpose
convolution with a given `input_shape` can map to. This function returns the
output shape which evenly divides the stride to produce the input shape in
a forward convolution, i.e. the maximal valid output shape with the given
configuration:
if the padding type is SAME then: output_shape = input_shape * stride
if the padding type is VALID then: output_shape = input_shape * stride +
kernel_shape - 1
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/versions/r0.8/api_docs/python/nn.html#convolution
Args:
input_shape: Sequence of sizes of each dimension of the input, excluding
batch and channel dimensions.
stride: Sequence or integer of kernel strides, excluding batch and channel
dimension strides.
kernel_shape: Sequence or integer of kernel sizes.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
Returns:
output_shape: A tuple of sizes for a transposed convolution that divide
evenly with the given strides, kernel shapes, and padding algorithm.
Raises:
TypeError: if `input_shape` is not a Sequence;
"""
if not isinstance(input_shape, collections.Sequence):
if input_shape is None:
raise TypeError("input_shape is None; if using Sonnet, are you sure you "
"have connected the module to inputs?")
raise TypeError("input_shape is of type {}, must be a sequence."
.format(type(input_shape)))
input_length = len(input_shape)
stride = _fill_and_verify_parameter_shape(stride, input_length, "stride")
padding = _verify_padding(padding)
output_shape = tuple(x * y for x, y in zip(input_shape, stride))
if padding == VALID:
kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, input_length,
"kernel")
output_shape = tuple(x + y - 1 for x, y in zip(output_shape, kernel_shape))
return output_shape
def _fill_shape(x, n):
"""Idempotentally converts an integer to a tuple of integers of a given size.
This is used to allow shorthand notation for various configuration parameters.
A user can provide either, for example, `2` or `[2, 2]` as a kernel shape, and
this function returns `(2, 2)` in both cases. Passing `[1, 2]` will return
`(1, 2)`.
Args:
x: An integer or an iterable of integers
n: An integer, the size of the desired output list
Returns:
If `x` is an integer, a tuple of size `n` containing `n` copies of `x`.
If `x` is an iterable of integers of size `n`, it returns `tuple(x)`.
Raises:
TypeError: If n is not a positive integer;
or if x is neither integer nor an iterable of size n.
"""
if not isinstance(n, numbers.Integral) or n < 1:
raise TypeError("n must be a positive integer")
if isinstance(x, numbers.Integral) and x > 0:
return (x,) * n
elif (isinstance(x, collections.Iterable) and len(x) == n and
all(isinstance(v, numbers.Integral) for v in x) and
all(v > 0 for v in x)):
return tuple(x)
else:
raise TypeError("x is {}, must be either a positive integer "
"or an iterable of positive integers of size {}"
.format(x, n))
def _fill_and_verify_parameter_shape(x, n, parameter_label):
"""Expands x if necessary into a `n`-D kernel shape and reports errors."""
try:
return _fill_shape(x, n)
except TypeError as e:
raise base.IncompatibleShapeError("Invalid " + parameter_label + " shape: "
"{}".format(e))
def _verify_padding(padding):
"""Verifies that the provided padding is supported. Returns padding."""
if padding not in ALLOWED_PADDINGS:
raise ValueError(
"Padding must be member of '{}', not {}".format(
ALLOWED_PADDINGS, padding))
return padding
def _fill_and_one_pad_stride(stride, n):
"""Expands the provided stride to size n and pads it with 1s."""
if isinstance(stride, numbers.Integral) or (
isinstance(stride, collections.Iterable) and len(stride) <= n):
return (1,) + _fill_shape(stride, n) + (1,)
elif isinstance(stride, collections.Iterable) and len(stride) == n + 2:
return stride
else:
raise base.IncompatibleShapeError(
"stride is {} ({}), must be either a positive integer or an iterable of"
" positive integers of size {}".format(stride, type(stride), n))
def create_weight_initializer(fan_in_shape):
"""Returns a default initializer for the weights of a convolutional module."""
stddev = 1 / math.sqrt(np.prod(fan_in_shape))
return tf.truncated_normal_initializer(stddev=stddev)
def create_bias_initializer(unused_bias_shape):
"""Returns a default initializer for the biases of a convolutional module."""
return tf.zeros_initializer()
class Conv2D(base.AbstractModule, base.Transposable):
"""Spatial convolution and dilated convolution module, including bias.
This acts as a light wrapper around the TensorFlow ops `tf.nn.convolution`
abstracting away variable creation and sharing.
"""
def __init__(self, output_channels, kernel_shape, stride=1, rate=1,
padding=SAME, use_bias=True, initializers=None,
partitioners=None, regularizers=None, mask=None,
data_format=DATA_FORMAT_NHWC, custom_getter=None,
name="conv_2d"):
"""Constructs a Conv2D module.
See the following documentation for an explanation of VALID versus SAME
padding modes:
https://www.tensorflow.org/api_guides/python/nn#Convolution
Args:
output_channels: Number of output channels. `output_channels` can be
either a number or a callable. In the latter case, since the function
invocation is deferred to graph construction time, the user must only
ensure that output_channels can be called, returning an integer,
when `build` is called.
kernel_shape: Sequence of kernel sizes (of size 2), or integer that is
used to define kernel size in all dimensions.
stride: Sequence of kernel strides (of size 2), or integer that is used to
define stride in all dimensions.
rate: Sequence of dilation rates (of size 2), or integer that is used to
define dilation rate in all dimensions. 1 corresponds to standard 2D
convolution, `rate > 1` corresponds to dilated convolution. Cannot be
> 1 if any of `stride` is also > 1.
padding: Padding algorithm, either `snt.SAME` or `snt.VALID`.
use_bias: Whether to include bias parameters. Default `True`.
initializers: Optional dict containing ops to initialize the filters (with
key 'w') or biases (with key 'b'). The default initializer for the
weights is a truncated normal initializer, which is commonly used
when the inputs are zero centered (see
https://arxiv.org/pdf/1502.03167v3.pdf). The default initializer for
the bias is a zero initializer.
partitioners: Optional dict containing partitioners to partition
weights (with key 'w') or biases (with key 'b'). As a default, no
partitioners are used.
regularizers: Optional dict containing regularizers for the filters
(with key 'w') and the biases (with key 'b'). As a default, no
regularizers are used. A regularizer should be a function that takes
a single `Tensor` as an input and returns a scalar `Tensor` output, e.g.
the L1 and L2 regularizers in `tf.contrib.layers`.
mask: Optional 2D or 4D array, tuple or numpy array containing values to
multiply the weights by component-wise.
data_format: A string. Specifies whether the channel dimension
of the input and output is the last dimension (default, NHWC), or the
second dimension ("NCHW").
custom_getter: Callable or dictionary of callables to use as
custom getters inside the module. If a dictionary, the keys
correspond to regexes to match variable names. See the `tf.get_variable`
documentation for information about the custom_getter API.
name: Name of the module.
Raises:
base.IncompatibleShapeError: If the given kernel shape is not an integer;
or if the given kernel shape is not a sequence of two integers.
base.IncompatibleShapeError: If the given stride is not an integer; or if
the given stride is not a sequence of two integers.
base.IncompatibleShapeError: If the given rate is not an integer; or if
the given rate is not a sequence of two integers.
base.IncompatibleShapeError: If a mask is given and its rank is neither 2
nor 4.
base.NotSupportedError: If rate in any dimension and the stride in any
dimension are simultaneously > 1.
ValueError: If the given padding is not `snt.VALID` or `snt.SAME`.
ValueError: If the given data_format is not a supported format (see
SUPPORTED_DATA_FORMATS).
KeyError: If `initializers`, `partitioners` or `regularizers` contain any
keys other than 'w' or 'b'.
TypeError: If any of the given initializers, partitioners or regularizers
are not callable.
TypeError: If mask is given and is not an array, tuple or a numpy array.
"""
super(Conv2D, self).__init__(custom_getter=custom_getter, name=name)
self._output_channels = output_channels
self._input_shape = None
self._kernel_shape = _fill_and_verify_parameter_shape(kernel_shape, 2,
"kernel")
if data_format not in SUPPORTED_DATA_FORMATS:
raise ValueError("Invalid data_format {:s}. Allowed formats "
"{:s}".format(data_format, SUPPORTED_DATA_FORMATS))
self._data_format = data_format
# The following is for backwards-compatibility from when we used to accept
# 4-strides of the form [1, m, n, 1].
if isinstance(stride, collections.Iterable) and len(stride) == 4:
self._stride = tuple(stride)[1:-1]
else:
self._stride = _fill_and_verify_parameter_shape(stride, 2, "stride")
self._rate = _fill_and_verify_parameter_shape(rate, 2, "rate")
if any(x > 1 for x in self._stride) and any(x > 1 for x in self._rate):
raise base.NotSupportedError(
"Cannot have stride > 1 with rate > 1")
self._padding = _verify_padding(padding)
self._use_bias = use_bias
self.possible_keys = self.get_possible_initializer_keys(use_bias=use_bias)
self._initializers = util.check_initializers(
initializers, self.possible_keys)
self._partitioners = util.check_partitioners(
partitioners, self.possible_keys)
self._regularizers = util.check_regularizers(
regularizers, self.possible_keys)
if mask is not None:
if not isinstance(mask, (list, tuple, np.ndarray)):
raise TypeError("Invalid type for mask: {}".format(type(mask)))
self._mask = np.asanyarray(mask)
mask_rank = mask.ndim
if mask_rank != 2 and mask_rank != 4:
raise base.IncompatibleShapeError(
"Invalid mask rank: {}".format(mask_rank))
else:
self._mask = None
@classmethod
def get_possible_initializer_keys(cls, use_bias=True):
return {"w", "b"} if use_bias else {"w"}
def _build(self, inputs):
"""Connects the Conv2D module into the graph, with input Tensor `inputs`.
If this is not the first time the module has been connected to the graph,
the input Tensor provided here must have the same final 3 dimensions, in
order for the existing variables to be the correct size for the
multiplication. The batch size may differ for each connection.
Args:
inputs: A 4D Tensor of shape [batch_size, input_height, input_width,
input_channels] or [batch_size, input_channels, input_height,
input_width](NCHW).
Returns:
A 4D Tensor of shape [batch_size, output_height, output_width,
output_channels] or [batch_size, out_channels, out_height, out_width].
Raises:
ValueError: If connecting the module into the graph any time after the
first time and the inferred size of the input does not match previous
invocations.
base.IncompatibleShapeError: If the input tensor has the wrong number
of dimensions.
base.IncompatibleShapeError: If a mask is present and its shape is
incompatible with the shape of the weights.
base.UnderspecifiedError: If the input tensor has an unknown
`input_channels`.
TypeError: If input Tensor dtype is not compatible with `tf.float32`.
"""
# Handle input whose shape is unknown during graph creation.
self._input_shape = tuple(inputs.get_shape().as_list())
if len(self._input_shape) != 4:
raise base.IncompatibleShapeError(
"Input Tensor must have shape (batch_size, input_height, input_"
"width, input_channels) or (batch_size, input_channels, input_height,"
" input_width) but was {}.".format(self._input_shape))
if self._data_format == DATA_FORMAT_NCHW:
input_channels = self._input_shape[1]
else:
input_channels = self._input_shape[3]
if input_channels is None:
raise base.UnderspecifiedError(
"Number of input channels must be known at module build time")
self._input_channels = input_channels
if not tf.float32.is_compatible_with(inputs.dtype):
raise TypeError(
"Input must have dtype tf.float32, but dtype was {}".format(
inputs.dtype))
weight_shape = (
self._kernel_shape[0],
self._kernel_shape[1],
self._input_channels,
self.output_channels)
bias_shape = (self.output_channels,)
if "w" not in self._initializers:
self._initializers["w"] = create_weight_initializer(weight_shape[:3])
if "b" not in self._initializers and self._use_bias:
self._initializers["b"] = create_bias_initializer(bias_shape)
self._w = tf.get_variable("w",
shape=weight_shape,
initializer=self._initializers["w"],
partitioner=self._partitioners.get("w", None),
regularizer=self._regularizers.get("w", None))
w = self._w
if self._mask is not None:
mask_rank = self._mask.ndim
mask_shape = self._mask.shape
if mask_rank == 2:
if mask_shape != self._kernel_shape:
raise base.IncompatibleShapeError(
"Invalid mask shape: {}".format(mask_shape))
mask = np.reshape(self._mask, self._kernel_shape + (1, 1))
elif mask_rank == 4:
if mask_shape != tuple(weight_shape):
raise base.IncompatibleShapeError(
"Invalid mask shape: {}".format(mask_shape))
mask = self._mask
w *= mask
outputs = tf.nn.convolution(inputs, w, strides=self._stride,
padding=self._padding, dilation_rate=self._rate,
data_format=self._data_format)
if self._use_bias:
self._b = tf.get_variable("b",
shape=bias_shape,
initializer=self._initializers["b"],
partitioner=self._partitioners.get("b", None),
regularizer=self._regularizers.get("b", None))
outputs = tf.nn.bias_add(outputs, self._b, data_format=self._data_format)
return outputs
@property
def output_channels(self):
"""Returns the number of output channels."""
if callable(self._output_channels):
self._output_channels = self._output_channels()
return self._output_channels
@property
def kernel_shape(self):
"""Returns the kernel shape."""
return self._kernel_shape